diff --git a/cmd/baton-postgresql/main.go b/cmd/baton-postgresql/main.go
index da0d7766..3635afde 100644
--- a/cmd/baton-postgresql/main.go
+++ b/cmd/baton-postgresql/main.go
@@ -38,7 +38,7 @@ func main() {
func getConnector(ctx context.Context, pgc *cfg.Postgresql) (types.ConnectorServer, error) {
l := ctxzap.Extract(ctx)
- cb, err := connector.New(ctx, pgc.Dsn, pgc.Schemas, pgc.IncludeColumns, pgc.IncludeLargeObjects, pgc.SyncAllDatabases)
+ cb, err := connector.New(ctx, pgc.Dsn, pgc.Schemas, pgc.IncludeColumns, pgc.IncludeLargeObjects, pgc.SyncAllDatabases, pgc.SkipBuiltInFunctions)
if err != nil {
l.Error("error creating connector", zap.Error(err))
return nil, err
diff --git a/go.mod b/go.mod
index 7bc29efd..ff77c3d0 100644
--- a/go.mod
+++ b/go.mod
@@ -3,20 +3,28 @@ module github.com/conductorone/baton-postgresql
go 1.23.4
require (
- github.com/conductorone/baton-sdk v0.2.93
+ github.com/conductorone/baton-sdk v0.3.9
github.com/ennyjfrick/ruleguard-logfatal v0.0.2
github.com/georgysavva/scany v1.2.2
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/jackc/pgconn v1.14.3
github.com/jackc/pgx/v4 v4.18.3
github.com/quasilyte/go-ruleguard/dsl v0.3.22
- github.com/spf13/viper v1.19.0
+ github.com/stretchr/testify v1.10.0
+ github.com/testcontainers/testcontainers-go v0.37.0
+ github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0
+ go.opentelemetry.io/otel v1.35.0
go.uber.org/zap v1.27.0
+ google.golang.org/grpc v1.71.0
)
require (
+ dario.cat/mergo v1.0.1 // indirect
filippo.io/age v1.2.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/aws/aws-lambda-go v1.47.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
@@ -43,11 +51,21 @@ require (
github.com/conductorone/dpop v0.2.3 // indirect
github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 // indirect
github.com/conductorone/dpop/integrations/dpop_oauth2 v0.2.3 // indirect
+ github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/platforms v0.2.1 // indirect
+ github.com/cpuguy83/dockercfg v0.3.2 // indirect
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/deckarep/golang-set/v2 v2.7.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/docker v28.0.1+incompatible // indirect
+ github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
github.com/dolthub/maphash v0.1.0 // indirect
github.com/doug-martin/goqu/v9 v9.19.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/ebitengine/purego v0.8.2 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/gammazero/deque v1.0.0 // indirect
github.com/glebarez/go-sqlite v1.22.0 // indirect
@@ -55,6 +73,7 @@ require (
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
@@ -70,12 +89,23 @@ require (
github.com/jellydator/ttlcache/v3 v3.3.0 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
- github.com/magiconair/properties v1.8.9 // indirect
+ github.com/magiconair/properties v1.8.10 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/maypok86/otter v1.2.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
+ github.com/moby/patternmatcher v0.6.0 // indirect
+ github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/sys/user v0.1.0 // indirect
+ github.com/moby/sys/userns v0.1.0 // indirect
+ github.com/moby/term v0.5.0 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/pquerna/cachecontrol v0.2.0 // indirect
github.com/pquerna/xjwt v0.3.0 // indirect
@@ -85,20 +115,22 @@ require (
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/shirou/gopsutil/v3 v3.24.5 // indirect
+ github.com/shirou/gopsutil/v4 v4.25.1 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
+ github.com/spf13/viper v1.19.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
- go.opentelemetry.io/otel v1.35.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
@@ -110,16 +142,15 @@ require (
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/ratelimit v0.3.1 // indirect
- golang.org/x/crypto v0.34.0 // indirect
+ golang.org/x/crypto v0.37.0 // indirect
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect
- golang.org/x/net v0.35.0 // indirect
+ golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.26.0 // indirect
- golang.org/x/sync v0.11.0 // indirect
- golang.org/x/sys v0.30.0 // indirect
- golang.org/x/text v0.22.0 // indirect
+ golang.org/x/sync v0.13.0 // indirect
+ golang.org/x/sys v0.32.0 // indirect
+ golang.org/x/text v0.24.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250219182151-9fdb1cabc7b2 // indirect
- google.golang.org/grpc v1.71.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index ce713d2d..91cfa029 100644
--- a/go.sum
+++ b/go.sum
@@ -1,14 +1,22 @@
c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805 h1:u2qwJeEvnypw+OCPUHmoZE3IqwfuN5kgDfo5MLzpNM0=
c2sp.org/CCTV/age v0.0.0-20240306222714-3ec4d716e805/go.mod h1:FomMrUJ2Lxt5jCLmZkG3FHa72zUprnhd3v/Z18Snm4w=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
+dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/age v1.2.1 h1:X0TZjehAZylOIj4DubWYU1vWQxv9bJpo+Uu2/LGhi1o=
filippo.io/age v1.2.1/go.mod h1:JL9ew2lTN+Pyft4RiNGguFfOpewKwSHm5ayKD/A4004=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI=
github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
@@ -63,18 +71,26 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs=
github.com/cockroachdb/cockroach-go/v2 v2.2.0/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI=
-github.com/conductorone/baton-sdk v0.2.93 h1:zAj0CdZ0T9EcwDD0S26QBOJABp9KR8em4G5LCj3CSbw=
-github.com/conductorone/baton-sdk v0.2.93/go.mod h1:nUgHSAf9P0lfamti5NlOSpeh1t99UNzMjIwf0I7n4/g=
+github.com/conductorone/baton-sdk v0.3.9 h1:D0YiYtRkpRByYsctlREqNG9pb5QAU5DW7sBlccAd3tI=
+github.com/conductorone/baton-sdk v0.3.9/go.mod h1:lWZHgu025Rsgs5jvBrhilGti0zWF2+YfaFY/bWOS/g0=
github.com/conductorone/dpop v0.2.3 h1:s91U3845GHQ6P6FWrdNr2SEOy1ES/jcFs1JtKSl2S+o=
github.com/conductorone/dpop v0.2.3/go.mod h1:gyo8TtzB9SCFCsjsICH4IaLZ7y64CcrDXMOPBwfq/3s=
github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3 h1:kLMCNIh0Mo2vbvvkCmJ3ixsPbXEJ6HPcW53Ku9yje3s=
github.com/conductorone/dpop/integrations/dpop_grpc v0.2.3/go.mod h1:LYNoUc1lkvozk9HBio+xI2w8YyfYy0v2cAJtIgrkj8o=
github.com/conductorone/dpop/integrations/dpop_oauth2 v0.2.3 h1:KhFaxiTzj9FteI9IE2tIGdSjJKyFW5ZcUF2SrgLnA28=
github.com/conductorone/dpop/integrations/dpop_oauth2 v0.2.3/go.mod h1:2eI0qv+XaEhoCw0GKFF1yH4X8Mp4KLVEVnQKRFEy4zs=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
+github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -82,12 +98,22 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/deckarep/golang-set/v2 v2.7.0 h1:gIloKvD7yH2oip4VLhsv3JyLLFnC0Y2mlusgcvJYW5k=
github.com/deckarep/golang-set/v2 v2.7.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
+github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
github.com/doug-martin/goqu/v9 v9.19.0 h1:PD7t1X3tRcUiSdc5TEyOFKujZA5gs3VSA7wxSvBx7qo=
github.com/doug-martin/goqu/v9 v9.19.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
+github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/ennyjfrick/ruleguard-logfatal v0.0.2 h1:FlNMe9+h029VZVD8n6YdFzZAQz/aA8y6WSZttg50yBM=
github.com/ennyjfrick/ruleguard-logfatal v0.0.2/go.mod h1:Ng4Cc8dzYEo8vzB2xd+IOxsO8X1OqO9mNnY4jbngQac=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -96,6 +122,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
@@ -217,6 +245,8 @@ github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgS
github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA=
github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw=
+github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
+github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
@@ -224,6 +254,8 @@ github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=
github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
+github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc=
github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -249,12 +281,13 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
-github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM=
-github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
+github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -271,10 +304,30 @@ github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/maypok86/otter v1.2.4 h1:HhW1Pq6VdJkmWwcZZq19BlEQkHtI8xgsQzBVXJU0nfc=
github.com/maypok86/otter v1.2.4/go.mod h1:mKLfoI7v1HOmQMwFgX4QkRk23mX6ge3RDvjdHOWG4R4=
+github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI=
+github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
+github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
+github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
+github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
+github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
@@ -319,6 +372,8 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
+github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs=
+github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
@@ -329,6 +384,8 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
@@ -346,8 +403,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -361,6 +419,10 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
+github.com/testcontainers/testcontainers-go v0.37.0 h1:L2Qc0vkTw2EHWQ08djon0D2uw7Z/PtHS/QzZZ5Ra/hg=
+github.com/testcontainers/testcontainers-go v0.37.0/go.mod h1:QPzbxZhQ6Bclip9igjLFj6z0hs01bU8lrl2dHQmgFGM=
+github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0 h1:hsVwFkS6s+79MbKEO+W7A1wNIw1fmkMtF4fg83m6kbc=
+github.com/testcontainers/testcontainers-go/modules/postgres v0.37.0/go.mod h1:Qj/eGbRbO/rEYdcRLmN+bEojzatP/+NS1y8ojl2PQsc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
@@ -377,6 +439,8 @@ go.opentelemetry.io/contrib/bridges/otelzap v0.10.0 h1:ojdSRDvjrnm30beHOmwsSvLpo
go.opentelemetry.io/contrib/bridges/otelzap v0.10.0/go.mod h1:oTTm4g7NEtHSV2i/0FeVdPaPgUIZPfQkFbq0vbzqnv0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A=
@@ -385,6 +449,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glB
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20=
go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y=
@@ -443,8 +509,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
-golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA=
-golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
+golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
+golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
@@ -476,8 +542,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
-golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
@@ -488,8 +554,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
-golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -507,25 +573,27 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
-golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
-golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
+golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
+golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -536,8 +604,10 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
-golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
+golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -600,6 +670,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/pkg/config/conf.gen.go b/pkg/config/conf.gen.go
index 0191d414..d7548dde 100644
--- a/pkg/config/conf.gen.go
+++ b/pkg/config/conf.gen.go
@@ -9,6 +9,7 @@ type Postgresql struct {
IncludeColumns bool `mapstructure:"include-columns"`
IncludeLargeObjects bool `mapstructure:"include-large-objects"`
SyncAllDatabases bool `mapstructure:"sync-all-databases"`
+ SkipBuiltInFunctions bool `mapstructure:"skip-built-in-functions"`
}
func (c* Postgresql) findFieldByTag(tagValue string) (any, bool) {
diff --git a/pkg/config/config.go b/pkg/config/config.go
index 4e01ced5..9da2c19c 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -5,16 +5,17 @@ import (
)
var (
- dsn = field.StringField("dsn", field.WithRequired(true), field.WithDescription("The DSN to connect to the database"))
- schemas = field.StringSliceField("schemas", field.WithDefaultValue([]string{"public"}), field.WithDescription("The schemas to include in the sync"))
- includeColumns = field.BoolField("include-columns", field.WithDescription("Include column privileges when syncing. This can result in large amounts of data"))
- includeLargeObjects = field.BoolField("include-large-objects", field.WithDescription("Include large objects when syncing. This can result in large amounts of data"))
- syncAllDatabases = field.BoolField("sync-all-databases", field.WithDescription("Sync all databases. This can result in large amounts of data"), field.WithDefaultValue(false))
+ dsn = field.StringField("dsn", field.WithRequired(true), field.WithDescription("The DSN to connect to the database"))
+ schemas = field.StringSliceField("schemas", field.WithDefaultValue([]string{"public"}), field.WithDescription("The schemas to include in the sync"))
+ includeColumns = field.BoolField("include-columns", field.WithDescription("Include column privileges when syncing. This can result in large amounts of data"))
+ includeLargeObjects = field.BoolField("include-large-objects", field.WithDescription("Include large objects when syncing. This can result in large amounts of data"))
+ syncAllDatabases = field.BoolField("sync-all-databases", field.WithDescription("Sync all databases. This can result in large amounts of data"), field.WithDefaultValue(false))
+ skipBuiltInFunctions = field.BoolField("skip-built-in-functions", field.WithDescription("Skip postgres built in functions"), field.WithDefaultValue(false))
)
var relationships = []field.SchemaFieldRelationship{}
//go:generate go run ./gen
var Config = field.NewConfiguration([]field.SchemaField{
- dsn, schemas, includeColumns, includeLargeObjects, syncAllDatabases,
+ dsn, schemas, includeColumns, includeLargeObjects, syncAllDatabases, skipBuiltInFunctions,
}, relationships...)
diff --git a/pkg/connector/connector.go b/pkg/connector/connector.go
index 07663428..cf4e4dda 100644
--- a/pkg/connector/connector.go
+++ b/pkg/connector/connector.go
@@ -12,11 +12,12 @@ import (
)
type Postgresql struct {
- clientPool *postgres.ClientDatabasesPool
- schemas []string
- includeColumns bool
- includeLargeObjects bool
- syncAllDatabases bool
+ clientPool *postgres.ClientDatabasesPool
+ schemas []string
+ includeColumns bool
+ includeLargeObjects bool
+ syncAllDatabases bool
+ skipBuiltInFunctions bool
}
func (o *Postgresql) ResourceSyncers(ctx context.Context) []connectorbuilder.ResourceSyncer {
@@ -26,7 +27,7 @@ func (o *Postgresql) ResourceSyncers(ctx context.Context) []connectorbuilder.Res
newTableSyncer(ctx, o.clientPool, o.includeColumns),
newViewSyncer(ctx, o.clientPool),
newColumnSyncer(ctx, o.clientPool),
- newFunctionSyncer(ctx, o.clientPool),
+ newFunctionSyncer(ctx, o.clientPool, o.skipBuiltInFunctions),
newProcedureSyncer(ctx, o.clientPool),
newLargeObjectSyncer(ctx, o.clientPool.Default(ctx), o.includeLargeObjects),
newDatabaseSyncer(ctx, o.clientPool, o.syncAllDatabases),
@@ -61,17 +62,26 @@ func (c *Postgresql) Asset(ctx context.Context, asset *v2.AssetRef) (string, io.
return "", nil, fmt.Errorf("not implemented")
}
-func New(ctx context.Context, dsn string, schemas []string, includeColumns bool, includeLargeObjects bool, syncAllDatabases bool) (*Postgresql, error) {
+func New(
+ ctx context.Context,
+ dsn string,
+ schemas []string,
+ includeColumns bool,
+ includeLargeObjects bool,
+ syncAllDatabases bool,
+ skipBuiltInFunctions bool,
+) (*Postgresql, error) {
clientPool, err := postgres.NewClientDatabasesPool(ctx, dsn, postgres.WithSchemaFilter(schemas))
if err != nil {
return nil, fmt.Errorf("failed to create postgres client pool: %w", err)
}
return &Postgresql{
- clientPool: clientPool,
- schemas: schemas,
- includeColumns: includeColumns,
- includeLargeObjects: includeLargeObjects,
- syncAllDatabases: syncAllDatabases,
+ clientPool: clientPool,
+ schemas: schemas,
+ includeColumns: includeColumns,
+ includeLargeObjects: includeLargeObjects,
+ syncAllDatabases: syncAllDatabases,
+ skipBuiltInFunctions: skipBuiltInFunctions,
}, nil
}
diff --git a/pkg/connector/connector_test.go b/pkg/connector/connector_test.go
new file mode 100644
index 00000000..c5f4cb04
--- /dev/null
+++ b/pkg/connector/connector_test.go
@@ -0,0 +1,186 @@
+package connector
+
+import (
+ "context"
+ "net"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+ connectorV2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/conductorone/baton-sdk/pkg/connectorbuilder"
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+ "github.com/conductorone/baton-sdk/pkg/dotc1z/manager"
+ "github.com/conductorone/baton-sdk/pkg/dotc1z/manager/local"
+ "github.com/conductorone/baton-sdk/pkg/sync"
+ "github.com/conductorone/baton-sdk/pkg/ugrpc"
+ "github.com/stretchr/testify/require"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/test/bufconn"
+
+ "os"
+ "testing"
+)
+
+const bufSize = 1024 * 1024
+
+type inMemoryConnectorClient struct {
+ connectorV2.ResourceTypesServiceClient
+ connectorV2.ResourcesServiceClient
+ connectorV2.ResourceGetterServiceClient
+ connectorV2.EntitlementsServiceClient
+ connectorV2.GrantsServiceClient
+ connectorV2.ConnectorServiceClient
+ connectorV2.AssetServiceClient
+ connectorV2.GrantManagerServiceClient
+ connectorV2.ResourceManagerServiceClient
+ connectorV2.ResourceDeleterServiceClient
+ connectorV2.AccountManagerServiceClient
+ connectorV2.CredentialManagerServiceClient
+ connectorV2.EventServiceClient
+ connectorV2.TicketsServiceClient
+ connectorV2.ActionServiceClient
+}
+
+func newTestConnector(t *testing.T) (context.Context, sync.Syncer, manager.Manager, *inMemoryConnectorClient) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ postgresConnector, err := New(
+ ctx,
+ container.Dsn(),
+ nil,
+ true,
+ true,
+ true,
+ true,
+ )
+ require.NoError(t, err)
+
+ srv, err := connectorbuilder.NewConnector(ctx, postgresConnector)
+ require.NoError(t, err)
+
+ tempPath, err := os.CreateTemp("", "baton-postgresql-test-c1z")
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ err := os.Remove(tempPath.Name())
+ require.NoError(t, err)
+ })
+
+ lis := bufconn.Listen(bufSize)
+ s := grpc.NewServer(
+ grpc.Creds(insecure.NewCredentials()),
+ grpc.ChainUnaryInterceptor(ugrpc.UnaryServerInterceptor(ctx)...),
+ grpc.ChainStreamInterceptor(ugrpc.StreamServerInterceptors(ctx)...),
+ grpc.StatsHandler(
+ otelgrpc.NewServerHandler(
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ ),
+ ),
+ ),
+ ),
+ )
+
+ connectorV2.RegisterConnectorServiceServer(s, srv)
+ connectorV2.RegisterGrantsServiceServer(s, srv)
+ connectorV2.RegisterEntitlementsServiceServer(s, srv)
+ connectorV2.RegisterResourcesServiceServer(s, srv)
+ connectorV2.RegisterResourceTypesServiceServer(s, srv)
+ connectorV2.RegisterAssetServiceServer(s, srv)
+ connectorV2.RegisterEventServiceServer(s, srv)
+ connectorV2.RegisterResourceGetterServiceServer(s, srv)
+ connectorV2.RegisterGrantManagerServiceServer(s, srv)
+ connectorV2.RegisterResourceManagerServiceServer(s, srv)
+ connectorV2.RegisterResourceDeleterServiceServer(s, srv)
+ connectorV2.RegisterAccountManagerServiceServer(s, srv)
+ connectorV2.RegisterCredentialManagerServiceServer(s, srv)
+
+ go func() {
+ if err := s.Serve(lis); err != nil {
+ t.Error("failed to serve:", err)
+ return
+ }
+ }()
+
+ t.Cleanup(func() {
+ s.Stop()
+ })
+
+ bufDialer := func(ctx context.Context, s string) (net.Conn, error) {
+ return lis.DialContext(ctx)
+ }
+
+ cc, err := grpc.NewClient(
+ "passthrough://bufnet",
+ grpc.WithContextDialer(bufDialer),
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ )
+ require.NoError(t, err)
+
+ client := &inMemoryConnectorClient{
+ ResourceTypesServiceClient: connectorV2.NewResourceTypesServiceClient(cc),
+ ResourcesServiceClient: connectorV2.NewResourcesServiceClient(cc),
+ EntitlementsServiceClient: connectorV2.NewEntitlementsServiceClient(cc),
+ GrantsServiceClient: connectorV2.NewGrantsServiceClient(cc),
+ ConnectorServiceClient: connectorV2.NewConnectorServiceClient(cc),
+ AssetServiceClient: connectorV2.NewAssetServiceClient(cc),
+ GrantManagerServiceClient: connectorV2.NewGrantManagerServiceClient(cc),
+ ResourceManagerServiceClient: connectorV2.NewResourceManagerServiceClient(cc),
+ ResourceDeleterServiceClient: connectorV2.NewResourceDeleterServiceClient(cc),
+ AccountManagerServiceClient: connectorV2.NewAccountManagerServiceClient(cc),
+ CredentialManagerServiceClient: connectorV2.NewCredentialManagerServiceClient(cc),
+ EventServiceClient: connectorV2.NewEventServiceClient(cc),
+ TicketsServiceClient: connectorV2.NewTicketsServiceClient(cc),
+ ActionServiceClient: connectorV2.NewActionServiceClient(cc),
+ ResourceGetterServiceClient: connectorV2.NewResourceGetterServiceClient(cc),
+ }
+
+ _, err = client.Validate(ctx, &connectorV2.ConnectorServiceValidateRequest{})
+ require.NoError(t, err)
+
+ syncer, err := sync.NewSyncer(
+ ctx,
+ client,
+ sync.WithC1ZPath(tempPath.Name()),
+ )
+ require.NoError(t, err)
+
+ localManager, err := local.New(ctx, tempPath.Name())
+ require.NoError(t, err)
+
+ return ctx, syncer, localManager, client
+}
+
+func getByDisplayName(ctx context.Context, c1z *dotc1z.C1File, resourceType *connectorV2.ResourceType, name string) (*connectorV2.Resource, error) {
+ resources, err := c1z.ListResources(ctx, &connectorV2.ResourcesServiceListResourcesRequest{
+ ResourceTypeId: resourceType.Id,
+ PageSize: 100,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rs := range resources.List {
+ if rs.DisplayName == name {
+ return rs, nil
+ }
+ }
+
+ return nil, nil
+}
+
+func TestConnectorFullSync(t *testing.T) {
+ ctx, syncer, _, _ := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+}
diff --git a/pkg/connector/function.go b/pkg/connector/function.go
index 787f1c1e..723f7761 100644
--- a/pkg/connector/function.go
+++ b/pkg/connector/function.go
@@ -18,8 +18,9 @@ var functionResourceType = &v2.ResourceType{
}
type functionSyncer struct {
- resourceType *v2.ResourceType
- clientPool *postgres.ClientDatabasesPool
+ resourceType *v2.ResourceType
+ clientPool *postgres.ClientDatabasesPool
+ skipBuiltInFunctions bool
}
func (r *functionSyncer) ResourceType(ctx context.Context) *v2.ResourceType {
@@ -47,7 +48,7 @@ func (r *functionSyncer) List(ctx context.Context, parentResourceID *v2.Resource
return nil, "", nil, err
}
- functions, nextPageToken, err := client.ListFunctions(ctx, parentID, &postgres.Pager{Token: pToken.Token, Size: pToken.Size})
+ functions, nextPageToken, err := client.ListFunctions(ctx, parentID, r.skipBuiltInFunctions, &postgres.Pager{Token: pToken.Token, Size: pToken.Size})
if err != nil {
return nil, "", nil, err
}
@@ -57,7 +58,7 @@ func (r *functionSyncer) List(ctx context.Context, parentResourceID *v2.Resource
var annos annotations.Annotations
ret = append(ret, &v2.Resource{
- DisplayName: o.Name,
+ DisplayName: o.Signature(),
Id: &v2.ResourceId{
ResourceType: r.resourceType.Id,
Resource: formatWithDatabaseID(functionResourceType.Id, db, o.ID),
@@ -125,9 +126,80 @@ func (r *functionSyncer) Grants(ctx context.Context, resource *v2.Resource, pTok
return ret, nextPageToken, nil, nil
}
-func newFunctionSyncer(ctx context.Context, c *postgres.ClientDatabasesPool) *functionSyncer {
+func (r *functionSyncer) Grant(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, nil, fmt.Errorf("baton-postgres: only users and roles can have function granted")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ function, err := dbClient.GetFunction(ctx, rID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = dbClient.GrantFunction(ctx, function.Schema, function, principal.DisplayName, privilegeName, isGrant)
+ if err != nil {
+ return nil, nil, err
+ }
+ return []*v2.Grant{
+ {
+ Id: fmt.Sprintf("%s:%s:%s", entitlement.Id, principal.Id.ResourceType, principal.Id.Resource),
+ Entitlement: entitlement,
+ Principal: principal,
+ },
+ }, nil, nil
+}
+
+func (r *functionSyncer) Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) {
+ entitlement := grant.Entitlement
+ principal := grant.Principal
+
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, fmt.Errorf("baton-postgres: only users and roles can have function revoked")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, err
+ }
+
+ function, err := dbClient.GetFunction(ctx, rID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dbClient.RevokeFunction(ctx, function.Schema, function, principal.DisplayName, privilegeName, isGrant)
+ return nil, err
+}
+
+func newFunctionSyncer(ctx context.Context, c *postgres.ClientDatabasesPool, skipBuiltInFunctions bool) *functionSyncer {
return &functionSyncer{
- resourceType: functionResourceType,
- clientPool: c,
+ resourceType: functionResourceType,
+ clientPool: c,
+ skipBuiltInFunctions: skipBuiltInFunctions,
}
}
diff --git a/pkg/connector/function_test.go b/pkg/connector/function_test.go
new file mode 100644
index 00000000..77dba2be
--- /dev/null
+++ b/pkg/connector/function_test.go
@@ -0,0 +1,70 @@
+package connector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+
+ connectorv2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGrantRevokeFunction(t *testing.T) {
+ ctx, syncer, manager, client := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+
+ c1z, err := manager.LoadC1Z(ctx)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ defer func(c1z *dotc1z.C1File) {
+ err := c1z.Close()
+ require.NoError(t, err)
+ }(c1z)
+
+ dbResource, err := getByDisplayName(ctx, c1z, databaseResourceType, "postgres")
+ require.NoError(t, err)
+ require.NotNil(t, dbResource)
+
+ roleResource, err := getByDisplayName(ctx, c1z, roleResourceType, "test_role")
+ require.NoError(t, err)
+ require.NotNil(t, roleResource)
+
+ functionResource, err := getByDisplayName(ctx, c1z, functionResourceType, "get_test_item_count()")
+ require.NoError(t, err)
+ require.NotNil(t, functionResource)
+
+ dbId, rId, err := parseWithDatabaseID(functionResource.Id.Resource)
+ require.NoError(t, err)
+
+ grantResponse, err := client.Grant(ctx, &connectorv2.GrantManagerServiceGrantRequest{
+ Principal: &connectorv2.Resource{
+ Id: roleResource.Id,
+ DisplayName: roleResource.DisplayName,
+ },
+ Entitlement: &connectorv2.Entitlement{
+ Id: fmt.Sprintf("entitlement:function:db%s:%d:execute", dbId, rId),
+ Resource: &connectorv2.Resource{
+ Id: &connectorv2.ResourceId{
+ ResourceType: functionResourceType.Id,
+ Resource: fmt.Sprintf("function:db%s:%d", dbId, rId),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, grantResponse)
+ require.Len(t, grantResponse.Grants, 1)
+
+ grant := grantResponse.Grants[0]
+
+ revokeResponse, err := client.Revoke(ctx, &connectorv2.GrantManagerServiceRevokeRequest{
+ Grant: grant,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, revokeResponse)
+}
diff --git a/pkg/connector/helpers.go b/pkg/connector/helpers.go
index a7ddd665..9a964123 100644
--- a/pkg/connector/helpers.go
+++ b/pkg/connector/helpers.go
@@ -88,18 +88,26 @@ func formatEntitlementID(resource *v2.Resource, privName string, grant bool) str
}
}
+// parseEntitlementID parses an entitlement ID and returns the resource type
+// resourceTypeId, resourceId, privilegeName, isGrant flag, and an error if any.
func parseEntitlementID(id string) (string, string, string, bool, error) {
- parts := strings.SplitN(id, ":", 5)
+ parts := strings.SplitN(id, ":", 6)
- if len(parts) == 4 {
- return parts[1], parts[2], parts[3], false, nil
+ if len(parts) <= 2 {
+ return "", "", "", false, fmt.Errorf("invalid entitlement ID: %s", id)
}
- if len(parts) == 5 && parts[4] == "grant" {
- return parts[1], parts[2], parts[3], true, nil
+ isGrant := false
+
+ if parts[len(parts)-1] == "grant" {
+ isGrant = true
+ }
+
+ if strings.HasPrefix(parts[2], "db") {
+ return parts[1], fmt.Sprintf("%s:%s", parts[2], parts[3]), parts[4], isGrant, nil
}
- return "", "", "", false, fmt.Errorf("invalid entilement ID %s %d", id, len(parts))
+ return parts[1], parts[2], parts[3], isGrant, nil
}
func grantsForPrivilegeSet(
diff --git a/pkg/connector/procedure.go b/pkg/connector/procedure.go
index dd8675c0..bee7f90b 100644
--- a/pkg/connector/procedure.go
+++ b/pkg/connector/procedure.go
@@ -57,7 +57,7 @@ func (r *procedureSyncer) List(ctx context.Context, parentResourceID *v2.Resourc
var annos annotations.Annotations
ret = append(ret, &v2.Resource{
- DisplayName: o.Name,
+ DisplayName: o.Signature(),
Id: &v2.ResourceId{
ResourceType: r.resourceType.Id,
Resource: formatWithDatabaseID(procedureResourceType.Id, db, o.ID),
@@ -108,6 +108,76 @@ func (r *procedureSyncer) Grants(ctx context.Context, resource *v2.Resource, pTo
return ret, nextPageToken, nil, nil
}
+func (r *procedureSyncer) Grant(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, nil, fmt.Errorf("baton-postgres: only users and roles can have procedure granted")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ procedure, err := dbClient.GetProcedure(ctx, rID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = dbClient.GrantProcedure(ctx, procedure.Schema, procedure, principal.DisplayName, privilegeName, isGrant)
+ if err != nil {
+ return nil, nil, err
+ }
+ return []*v2.Grant{
+ {
+ Id: fmt.Sprintf("%s:%s:%s", entitlement.Id, principal.Id.ResourceType, principal.Id.Resource),
+ Entitlement: entitlement,
+ Principal: principal,
+ },
+ }, nil, nil
+}
+
+func (r *procedureSyncer) Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) {
+ entitlement := grant.Entitlement
+ principal := grant.Principal
+
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, fmt.Errorf("baton-postgres: only users and roles can have procedure revoked")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, err
+ }
+
+ procedure, err := dbClient.GetProcedure(ctx, rID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dbClient.RevokeProcedure(ctx, procedure.Schema, procedure, principal.DisplayName, privilegeName, isGrant)
+ return nil, err
+}
+
func newProcedureSyncer(ctx context.Context, c *postgres.ClientDatabasesPool) *procedureSyncer {
return &procedureSyncer{
resourceType: procedureResourceType,
diff --git a/pkg/connector/procedure_test.go b/pkg/connector/procedure_test.go
new file mode 100644
index 00000000..17b1d245
--- /dev/null
+++ b/pkg/connector/procedure_test.go
@@ -0,0 +1,70 @@
+package connector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+
+ connectorv2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGrantRevokeProcedure(t *testing.T) {
+ ctx, syncer, manager, client := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+
+ c1z, err := manager.LoadC1Z(ctx)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ defer func(c1z *dotc1z.C1File) {
+ err := c1z.Close()
+ require.NoError(t, err)
+ }(c1z)
+
+ dbResource, err := getByDisplayName(ctx, c1z, databaseResourceType, "postgres")
+ require.NoError(t, err)
+ require.NotNil(t, dbResource)
+
+ roleResource, err := getByDisplayName(ctx, c1z, roleResourceType, "test_role")
+ require.NoError(t, err)
+ require.NotNil(t, roleResource)
+
+ procedureResource, err := getByDisplayName(ctx, c1z, procedureResourceType, "add_test_item(IN item_name character varying)")
+ require.NoError(t, err)
+ require.NotNil(t, procedureResource)
+
+ dbId, rId, err := parseWithDatabaseID(procedureResource.Id.Resource)
+ require.NoError(t, err)
+
+ grantResponse, err := client.Grant(ctx, &connectorv2.GrantManagerServiceGrantRequest{
+ Principal: &connectorv2.Resource{
+ Id: roleResource.Id,
+ DisplayName: roleResource.DisplayName,
+ },
+ Entitlement: &connectorv2.Entitlement{
+ Id: fmt.Sprintf("entitlement:procedure:db%s:%d:execute", dbId, rId),
+ Resource: &connectorv2.Resource{
+ Id: &connectorv2.ResourceId{
+ ResourceType: procedureResourceType.Id,
+ Resource: fmt.Sprintf("procedure:db%s:%d", dbId, rId),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, grantResponse)
+ require.Len(t, grantResponse.Grants, 1)
+
+ grant := grantResponse.Grants[0]
+
+ revokeResponse, err := client.Revoke(ctx, &connectorv2.GrantManagerServiceRevokeRequest{
+ Grant: grant,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, revokeResponse)
+}
diff --git a/pkg/connector/sequence.go b/pkg/connector/sequence.go
index a79e3851..77b56e56 100644
--- a/pkg/connector/sequence.go
+++ b/pkg/connector/sequence.go
@@ -129,6 +129,77 @@ func (r *sequenceSyncer) Grants(ctx context.Context, resource *v2.Resource, pTok
return ret, nextPageToken, nil, nil
}
+func (r *sequenceSyncer) Grant(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, nil, fmt.Errorf("baton-postgres: only users and roles can have sequence granted")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sequence, err := dbClient.GetSequence(ctx, rID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = dbClient.GrantSequence(ctx, sequence.Schema, sequence.Name, principal.DisplayName, privilegeName, isGrant)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return []*v2.Grant{
+ {
+ Id: fmt.Sprintf("%s:%s:%s", entitlement.Id, principal.Id.ResourceType, principal.Id.Resource),
+ Entitlement: entitlement,
+ Principal: principal,
+ },
+ }, nil, nil
+}
+
+func (r *sequenceSyncer) Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) {
+ entitlement := grant.Entitlement
+ principal := grant.Principal
+
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, fmt.Errorf("baton-postgres: only users and roles can have sequence revoked")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, err
+ }
+
+ sequence, err := dbClient.GetSequence(ctx, rID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dbClient.RevokeSequence(ctx, sequence.Schema, sequence.Name, principal.DisplayName, privilegeName, isGrant)
+ return nil, err
+}
+
func newSequenceSyncer(ctx context.Context, c *postgres.ClientDatabasesPool) *sequenceSyncer {
return &sequenceSyncer{
resourceType: sequenceResourceType,
diff --git a/pkg/connector/sequence_test.go b/pkg/connector/sequence_test.go
new file mode 100644
index 00000000..b7aedddd
--- /dev/null
+++ b/pkg/connector/sequence_test.go
@@ -0,0 +1,70 @@
+package connector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+
+ connectorv2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGrantRevokeSequence(t *testing.T) {
+ ctx, syncer, manager, client := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+
+ c1z, err := manager.LoadC1Z(ctx)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ defer func(c1z *dotc1z.C1File) {
+ err := c1z.Close()
+ require.NoError(t, err)
+ }(c1z)
+
+ dbResource, err := getByDisplayName(ctx, c1z, databaseResourceType, "postgres")
+ require.NoError(t, err)
+ require.NotNil(t, dbResource)
+
+ roleResource, err := getByDisplayName(ctx, c1z, roleResourceType, "test_role")
+ require.NoError(t, err)
+ require.NotNil(t, roleResource)
+
+ sequenceResource, err := getByDisplayName(ctx, c1z, sequenceResourceType, "test_table_seq")
+ require.NoError(t, err)
+ require.NotNil(t, sequenceResource)
+
+ dbId, rId, err := parseWithDatabaseID(sequenceResource.Id.Resource)
+ require.NoError(t, err)
+
+ grantResponse, err := client.Grant(ctx, &connectorv2.GrantManagerServiceGrantRequest{
+ Principal: &connectorv2.Resource{
+ Id: roleResource.Id,
+ DisplayName: roleResource.DisplayName,
+ },
+ Entitlement: &connectorv2.Entitlement{
+ Id: fmt.Sprintf("entitlement:sequence:db%s:%d:select", dbId, rId),
+ Resource: &connectorv2.Resource{
+ Id: &connectorv2.ResourceId{
+ ResourceType: sequenceResourceType.Id,
+ Resource: fmt.Sprintf("sequence:db%s:%d", dbId, rId),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, grantResponse)
+ require.Len(t, grantResponse.Grants, 1)
+
+ grant := grantResponse.Grants[0]
+
+ revokeResponse, err := client.Revoke(ctx, &connectorv2.GrantManagerServiceRevokeRequest{
+ Grant: grant,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, revokeResponse)
+}
diff --git a/pkg/connector/table.go b/pkg/connector/table.go
index 368727bf..0719adb8 100644
--- a/pkg/connector/table.go
+++ b/pkg/connector/table.go
@@ -139,6 +139,77 @@ func (r *tableSyncer) Grants(ctx context.Context, resource *v2.Resource, pToken
return ret, nextPageToken, nil, nil
}
+func (r *tableSyncer) Grant(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, nil, fmt.Errorf("baton-postgres: only users and roles can have table granted")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ table, err := dbClient.GetTable(ctx, rID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = dbClient.GrantTable(ctx, table.Schema, table.Name, principal.DisplayName, privilegeName, isGrant)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return []*v2.Grant{
+ {
+ Id: fmt.Sprintf("%s:%s:%s", entitlement.Id, principal.Id.ResourceType, principal.Id.Resource),
+ Entitlement: entitlement,
+ Principal: principal,
+ },
+ }, nil, nil
+}
+
+func (r *tableSyncer) Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) {
+ entitlement := grant.Entitlement
+ principal := grant.Principal
+
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, fmt.Errorf("baton-postgres: only users and roles can have table revoked")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, err
+ }
+
+ table, err := dbClient.GetTable(ctx, rID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dbClient.RevokeTable(ctx, table.Schema, table.Name, principal.DisplayName, privilegeName, isGrant)
+ return nil, err
+}
+
func newTableSyncer(ctx context.Context, c *postgres.ClientDatabasesPool, includeColumns bool) *tableSyncer {
return &tableSyncer{
resourceType: tableResourceType,
diff --git a/pkg/connector/table_test.go b/pkg/connector/table_test.go
new file mode 100644
index 00000000..592900cf
--- /dev/null
+++ b/pkg/connector/table_test.go
@@ -0,0 +1,70 @@
+package connector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+
+ connectorv2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGrantRevokeTable(t *testing.T) {
+ ctx, syncer, manager, client := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+
+ c1z, err := manager.LoadC1Z(ctx)
+ require.NoError(t, err)
+ require.NoError(t, err)
+ defer func(c1z *dotc1z.C1File) {
+ err := c1z.Close()
+ require.NoError(t, err)
+ }(c1z)
+
+ dbResource, err := getByDisplayName(ctx, c1z, databaseResourceType, "postgres")
+ require.NoError(t, err)
+ require.NotNil(t, dbResource)
+
+ roleResource, err := getByDisplayName(ctx, c1z, roleResourceType, "test_role")
+ require.NoError(t, err)
+ require.NotNil(t, roleResource)
+
+ tableResource, err := getByDisplayName(ctx, c1z, tableResourceType, "test_table")
+ require.NoError(t, err)
+ require.NotNil(t, tableResource)
+
+ dbId, rId, err := parseWithDatabaseID(tableResource.Id.Resource)
+ require.NoError(t, err)
+
+ grantResponse, err := client.Grant(ctx, &connectorv2.GrantManagerServiceGrantRequest{
+ Principal: &connectorv2.Resource{
+ Id: roleResource.Id,
+ DisplayName: roleResource.DisplayName,
+ },
+ Entitlement: &connectorv2.Entitlement{
+ Id: fmt.Sprintf("entitlement:table:db%s:%d:select", dbId, rId),
+ Resource: &connectorv2.Resource{
+ Id: &connectorv2.ResourceId{
+ ResourceType: tableResourceType.Id,
+ Resource: fmt.Sprintf("table:db%s:%d", dbId, rId),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, grantResponse)
+ require.Len(t, grantResponse.Grants, 1)
+
+ grant := grantResponse.Grants[0]
+
+ revokeResponse, err := client.Revoke(ctx, &connectorv2.GrantManagerServiceRevokeRequest{
+ Grant: grant,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, revokeResponse)
+}
diff --git a/pkg/connector/view.go b/pkg/connector/view.go
index 16087e9a..9fcb8936 100644
--- a/pkg/connector/view.go
+++ b/pkg/connector/view.go
@@ -129,6 +129,77 @@ func (r *viewSyncer) Grants(ctx context.Context, resource *v2.Resource, pToken *
return ret, nextPageToken, nil, nil
}
+func (r *viewSyncer) Grant(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, nil, fmt.Errorf("baton-postgres: only users and roles can have view granted")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ view, err := dbClient.GetView(ctx, rID)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = dbClient.GrantView(ctx, view.Schema, view.Name, principal.DisplayName, privilegeName, isGrant)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return []*v2.Grant{
+ {
+ Id: fmt.Sprintf("%s:%s:%s", entitlement.Id, principal.Id.ResourceType, principal.Id.Resource),
+ Entitlement: entitlement,
+ Principal: principal,
+ },
+ }, nil, nil
+}
+
+func (r *viewSyncer) Revoke(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error) {
+ entitlement := grant.Entitlement
+ principal := grant.Principal
+
+ if principal.Id.ResourceType != roleResourceType.Id {
+ return nil, fmt.Errorf("baton-postgres: only users and roles can have view revoked")
+ }
+
+ _, _, privilegeName, isGrant, err := parseEntitlementID(entitlement.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ dbId, rID, err := parseWithDatabaseID(entitlement.Resource.Id.Resource)
+ if err != nil {
+ return nil, err
+ }
+
+ dbClient, _, err := r.clientPool.Get(ctx, dbId)
+ if err != nil {
+ return nil, err
+ }
+
+ view, err := dbClient.GetView(ctx, rID)
+ if err != nil {
+ return nil, err
+ }
+
+ err = dbClient.RevokeView(ctx, view.Schema, view.Name, principal.DisplayName, privilegeName, isGrant)
+ return nil, err
+}
+
func newViewSyncer(ctx context.Context, c *postgres.ClientDatabasesPool) *viewSyncer {
return &viewSyncer{
resourceType: viewResourceType,
diff --git a/pkg/connector/view_test.go b/pkg/connector/view_test.go
new file mode 100644
index 00000000..376b56c5
--- /dev/null
+++ b/pkg/connector/view_test.go
@@ -0,0 +1,69 @@
+package connector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+
+ connectorv2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGrantRevokeView(t *testing.T) {
+ ctx, syncer, manager, client := newTestConnector(t)
+
+ err := syncer.Sync(ctx)
+ require.NoError(t, err)
+ err = syncer.Close(ctx)
+ require.NoError(t, err)
+
+ c1z, err := manager.LoadC1Z(ctx)
+ require.NoError(t, err)
+ defer func(c1z *dotc1z.C1File) {
+ err := c1z.Close()
+ require.NoError(t, err)
+ }(c1z)
+
+ dbResource, err := getByDisplayName(ctx, c1z, databaseResourceType, "postgres")
+ require.NoError(t, err)
+ require.NotNil(t, dbResource)
+
+ roleResource, err := getByDisplayName(ctx, c1z, roleResourceType, "test_role")
+ require.NoError(t, err)
+ require.NotNil(t, roleResource)
+
+ viewResource, err := getByDisplayName(ctx, c1z, viewResourceType, "test_table_view")
+ require.NoError(t, err)
+ require.NotNil(t, viewResource)
+
+ dbId, rId, err := parseWithDatabaseID(viewResource.Id.Resource)
+ require.NoError(t, err)
+
+ grantResponse, err := client.Grant(ctx, &connectorv2.GrantManagerServiceGrantRequest{
+ Principal: &connectorv2.Resource{
+ Id: roleResource.Id,
+ DisplayName: roleResource.DisplayName,
+ },
+ Entitlement: &connectorv2.Entitlement{
+ Id: fmt.Sprintf("entitlement:view:db%s:%d:select:grant", dbId, rId),
+ Resource: &connectorv2.Resource{
+ Id: &connectorv2.ResourceId{
+ ResourceType: viewResourceType.Id,
+ Resource: fmt.Sprintf("view:db%s:%d", dbId, rId),
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, grantResponse)
+ require.Len(t, grantResponse.Grants, 1)
+
+ grant := grantResponse.Grants[0]
+
+ revokeResponse, err := client.Revoke(ctx, &connectorv2.GrantManagerServiceRevokeRequest{
+ Grant: grant,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, revokeResponse)
+}
diff --git a/pkg/postgres/client.go b/pkg/postgres/client.go
index 5f2bb670..273cdddb 100644
--- a/pkg/postgres/client.go
+++ b/pkg/postgres/client.go
@@ -9,6 +9,8 @@ import (
"go.uber.org/zap"
)
+const withGrantOptions = " WITH GRANT OPTION"
+
type ClientDatabasesPool struct {
databases map[string]*Client
opts []ClientOpt
diff --git a/pkg/postgres/databases.go b/pkg/postgres/databases.go
index 60bf3177..998bcee8 100644
--- a/pkg/postgres/databases.go
+++ b/pkg/postgres/databases.go
@@ -167,6 +167,16 @@ func transformPrivilege(privilege string) string {
return strings.ReplaceAll(privilege, "-", "")
}
+func sanitizePrivilege(privilege string) string {
+ temp := pgx.Identifier{transformPrivilege(privilege)}.Sanitize()
+
+ if strings.Count(temp, "\"") == 2 {
+ return strings.ReplaceAll(temp, "\"", "")
+ }
+
+ return temp
+}
+
func (c *Client) GrantDatabase(ctx context.Context, dbName string, principalName string, privilege string, isGrant bool) error {
l := ctxzap.Extract(ctx)
l.Debug("granting database", zap.String("dbName", dbName), zap.String("principalName", principalName), zap.String("privilege", privilege))
diff --git a/pkg/postgres/functions.go b/pkg/postgres/functions.go
index c6683a3f..de15e92a 100644
--- a/pkg/postgres/functions.go
+++ b/pkg/postgres/functions.go
@@ -4,20 +4,25 @@ import (
"context"
"database/sql"
"errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
+
"github.com/georgysavva/scany/pgxscan"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
"go.uber.org/zap"
)
type FunctionModel struct {
- ID int64 `db:"oid"`
- Name string `db:"proname"`
- Schema string `db:"nspname"`
- OwnerID int64 `db:"proowner"`
- ACLs []string `db:"proacl"`
+ ID int64 `db:"oid"`
+ Name string `db:"proname"`
+ Schema string `db:"nspname"`
+ OwnerID int64 `db:"proowner"`
+ ACLs []string `db:"proacl"`
+ Arguments string `db:"arguments"`
+ ReturnType string `db:"return_type"`
}
func (t *FunctionModel) GetOwnerID() int64 {
@@ -36,14 +41,21 @@ func (t *FunctionModel) DefaultPrivileges() PrivilegeSet {
return Execute
}
+func (t *FunctionModel) Signature() string {
+ return fmt.Sprintf("%s(%s)", t.Name, t.Arguments)
+}
+
func (c *Client) GetFunction(ctx context.Context, functionID int64) (*FunctionModel, error) {
ret := &FunctionModel{}
q := `
SELECT DISTINCT
- a."oid"::int, a."proname",
+ a."oid"::int,
+ a."proname",
n."nspname",
- a."proowner"::int, a."proacl"
+ a."proowner"::int, a."proacl",
+ pg_get_function_arguments(a.oid) AS arguments,
+ pg_get_function_result(a.oid) AS return_type
FROM "pg_catalog"."pg_proc" a
LEFT JOIN pg_namespace n ON n."oid" = a."pronamespace"
WHERE a."oid" = $1
@@ -57,7 +69,7 @@ WHERE a."oid" = $1
return ret, nil
}
-func (c *Client) ListFunctions(ctx context.Context, schemaID int64, pager *Pager) ([]*FunctionModel, string, error) {
+func (c *Client) ListFunctions(ctx context.Context, schemaID int64, skipBuiltInFunctions bool, pager *Pager) ([]*FunctionModel, string, error) {
l := ctxzap.Extract(ctx)
l.Debug("listing functions for schema", zap.Int64("schema_id", schemaID))
@@ -68,14 +80,22 @@ func (c *Client) ListFunctions(ctx context.Context, schemaID int64, pager *Pager
var args []interface{}
sb := &strings.Builder{}
_, _ = sb.WriteString(`
-SELECT a."oid"::int, a."proname",
+SELECT a."oid"::int,
+ a."proname",
n."nspname",
- a."proowner"::int, a."proacl"
+ a."proowner"::int, a."proacl",
+ pg_get_function_arguments(a.oid) AS arguments,
+ pg_get_function_result(a.oid) AS return_type
FROM "pg_catalog"."pg_proc" a
LEFT JOIN pg_namespace n ON n."oid" = a."pronamespace"
WHERE a."prokind" = 'f'
AND a."pronamespace" = $1
`)
+
+ if skipBuiltInFunctions {
+ _, _ = sb.WriteString(` AND n."nspname" NOT IN ('pg_catalog', 'information_schema') `)
+ }
+
args = append(args, schemaID)
_, _ = sb.WriteString("LIMIT $2 ")
args = append(args, limit+1)
@@ -102,3 +122,42 @@ WHERE a."prokind" = 'f'
return ret, nextPageToken, nil
}
+
+func (c *Client) GrantFunction(ctx context.Context, schema string, functionSignature *FunctionModel, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("granting function", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedFunctionSignature := functionSignature.Signature()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ q := fmt.Sprintf("GRANT %s ON FUNCTION %s.%s TO %s", sanitizedPrivilege, sanitizedSchema, sanitizedFunctionSignature, sanitizedPrincipalName)
+
+ if isGrant {
+ q += withGrantOptions
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
+
+func (c *Client) RevokeFunction(ctx context.Context, schema string, functionSignature *FunctionModel, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("revoking function", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedFunctionSignature := functionSignature.Signature()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+ var q string
+
+ if isGrant {
+ q = fmt.Sprintf("REVOKE GRANT OPTION FOR %s ON FUNCTION %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedFunctionSignature, sanitizedPrincipalName)
+ } else {
+ q = fmt.Sprintf("REVOKE %s ON FUNCTION %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedFunctionSignature, sanitizedPrincipalName)
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
diff --git a/pkg/postgres/functions_test.go b/pkg/postgres/functions_test.go
new file mode 100644
index 00000000..eefcc48a
--- /dev/null
+++ b/pkg/postgres/functions_test.go
@@ -0,0 +1,42 @@
+package postgres
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+)
+
+func TestFunctionGrantRevoke(t *testing.T) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ client, err := New(ctx, container.Dsn())
+ require.NoError(t, err)
+
+ functionModel := &FunctionModel{Name: "get_test_item_count", Arguments: ""}
+
+ // Is grant true
+ err = client.GrantFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+
+ err = client.RevokeFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+
+ // is grant false
+ err = client.GrantFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ // revoke without grant
+ err = client.RevokeFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeFunction(ctx, "public", functionModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+}
diff --git a/pkg/postgres/pagination.go b/pkg/postgres/pagination.go
index a14a327a..7d68e4c9 100644
--- a/pkg/postgres/pagination.go
+++ b/pkg/postgres/pagination.go
@@ -5,8 +5,8 @@ import (
)
const (
- MaxPageSize = 100
- MinPageSize = 10
+ MaxPageSize = 500
+ MinPageSize = 100
)
type Pager struct {
diff --git a/pkg/postgres/procedures.go b/pkg/postgres/procedures.go
index 13325691..4edebbd3 100644
--- a/pkg/postgres/procedures.go
+++ b/pkg/postgres/procedures.go
@@ -4,20 +4,24 @@ import (
"context"
"database/sql"
"errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
+
"github.com/georgysavva/scany/pgxscan"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
"go.uber.org/zap"
)
type ProcedureModel struct {
- ID int64 `db:"oid"`
- Name string `db:"proname"`
- Schema string `db:"nspname"`
- OwnerID int64 `db:"proowner"`
- ACLs []string `db:"proacl"`
+ ID int64 `db:"oid"`
+ Name string `db:"proname"`
+ Schema string `db:"nspname"`
+ OwnerID int64 `db:"proowner"`
+ ACLs []string `db:"proacl"`
+ Arguments string `db:"arguments"`
}
func (t *ProcedureModel) GetOwnerID() int64 {
@@ -36,6 +40,10 @@ func (t *ProcedureModel) DefaultPrivileges() PrivilegeSet {
return Execute
}
+func (t *ProcedureModel) Signature() string {
+ return fmt.Sprintf("%s(%s)", t.Name, t.Arguments)
+}
+
func (c *Client) GetProcedure(ctx context.Context, functionID int64) (*ProcedureModel, error) {
ret := &ProcedureModel{}
@@ -44,7 +52,9 @@ SELECT DISTINCT
a."oid"::int,
a."proname",
n."nspname",
- a."proowner"::int, a."proacl"
+ a."proowner"::int,
+ a."proacl",
+ pg_get_function_arguments(a.oid) as arguments
FROM "pg_catalog"."pg_proc" a
LEFT JOIN pg_namespace n ON n."oid" = a."pronamespace"
WHERE a."oid" = $1
@@ -69,7 +79,13 @@ func (c *Client) ListProcedures(ctx context.Context, schemaID int64, pager *Page
var args []interface{}
sb := &strings.Builder{}
_, _ = sb.WriteString(`
-select a."oid"::int, a."proname", n."nspname", a."proowner"::int, a."proacl"
+select
+ a."oid"::int,
+ a."proname",
+ n."nspname",
+ a."proowner"::int,
+ a."proacl",
+ pg_get_function_arguments(a.oid) as arguments
from "pg_catalog"."pg_proc" a
LEFT JOIN pg_namespace n ON n."oid" = a."pronamespace"
where a."prokind" = 'p'
@@ -101,3 +117,43 @@ where a."prokind" = 'p'
return ret, nextPageToken, nil
}
+
+func (c *Client) GrantProcedure(ctx context.Context, schema string, procedure *ProcedureModel, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("granting procedure", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedProcedure := procedure.Signature()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ q := fmt.Sprintf("GRANT %s ON PROCEDURE %s.%s TO %s", sanitizedPrivilege, sanitizedSchema, sanitizedProcedure, sanitizedPrincipalName)
+
+ if isGrant {
+ q += withGrantOptions
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
+
+func (c *Client) RevokeProcedure(ctx context.Context, schema string, procedure *ProcedureModel, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("revoking procedure", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedProcedure := procedure.Signature()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ var q string
+
+ if isGrant {
+ q = fmt.Sprintf("REVOKE GRANT OPTION FOR %s ON PROCEDURE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedProcedure, sanitizedPrincipalName)
+ } else {
+ q = fmt.Sprintf("REVOKE %s ON PROCEDURE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedProcedure, sanitizedPrincipalName)
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
diff --git a/pkg/postgres/procedures_test.go b/pkg/postgres/procedures_test.go
new file mode 100644
index 00000000..a1a605a1
--- /dev/null
+++ b/pkg/postgres/procedures_test.go
@@ -0,0 +1,45 @@
+package postgres
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+)
+
+func TestProcedureGrantRevoke(t *testing.T) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ client, err := New(ctx, container.Dsn())
+ require.NoError(t, err)
+
+ procedureModel := &ProcedureModel{
+ Name: "add_test_item",
+ Arguments: "IN item_name character varying",
+ }
+
+ // Is grant true
+ err = client.GrantProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+
+ err = client.RevokeProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+
+ // is grant false
+ err = client.GrantProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ // revoke without grant
+ err = client.RevokeProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeProcedure(ctx, "public", procedureModel, container.Role(), Execute.Name(), true)
+ require.NoError(t, err)
+}
diff --git a/pkg/postgres/sequences.go b/pkg/postgres/sequences.go
index 9fd270aa..5eafb281 100644
--- a/pkg/postgres/sequences.go
+++ b/pkg/postgres/sequences.go
@@ -4,9 +4,13 @@ import (
"context"
"database/sql"
"errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
+ "go.uber.org/zap"
+
"github.com/georgysavva/scany/pgxscan"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
)
@@ -102,3 +106,43 @@ WHERE n."oid" = $1
return ret, nextPageToken, nil
}
+
+func (c *Client) GrantSequence(ctx context.Context, schema, sequenceName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("granting sequence", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedSequenceName := pgx.Identifier{sequenceName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ q := fmt.Sprintf("GRANT %s ON %s.%s TO %s", sanitizedPrivilege, sanitizedSchema, sanitizedSequenceName, sanitizedPrincipalName)
+
+ if isGrant {
+ q += withGrantOptions
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
+
+func (c *Client) RevokeSequence(ctx context.Context, schema, sequenceName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("revoking sequence", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedSequenceName := pgx.Identifier{sequenceName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ var q string
+
+ if isGrant {
+ q = fmt.Sprintf("REVOKE GRANT OPTION FOR %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedSequenceName, sanitizedPrincipalName)
+ } else {
+ q = fmt.Sprintf("REVOKE %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedSequenceName, sanitizedPrincipalName)
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
diff --git a/pkg/postgres/sequences_test.go b/pkg/postgres/sequences_test.go
new file mode 100644
index 00000000..4df83d7e
--- /dev/null
+++ b/pkg/postgres/sequences_test.go
@@ -0,0 +1,40 @@
+package postgres
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+)
+
+func TestSequencesGrantRevoke(t *testing.T) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ client, err := New(ctx, container.Dsn())
+ require.NoError(t, err)
+
+ // Is grant true
+ err = client.GrantSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ err = client.RevokeSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ // is grant false
+ err = client.GrantSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ // revoke without grant
+ err = client.RevokeSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeSequence(ctx, "public", "test_table_seq", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+}
diff --git a/pkg/postgres/tables.go b/pkg/postgres/tables.go
index 4ebb16c7..d95079cf 100644
--- a/pkg/postgres/tables.go
+++ b/pkg/postgres/tables.go
@@ -4,9 +4,13 @@ import (
"context"
"database/sql"
"errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
+ "go.uber.org/zap"
+
"github.com/georgysavva/scany/pgxscan"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
)
@@ -92,3 +96,43 @@ WHERE n."nspname" = $1
return ret, nextPageToken, nil
}
+
+func (c *Client) GrantTable(ctx context.Context, schema string, tableName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("granting table", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedTableName := pgx.Identifier{tableName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ q := fmt.Sprintf("GRANT %s ON TABLE %s.%s TO %s", sanitizedPrivilege, sanitizedSchema, sanitizedTableName, sanitizedPrincipalName)
+
+ if isGrant {
+ q += withGrantOptions
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
+
+func (c *Client) RevokeTable(ctx context.Context, schema string, tableName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("revoking table", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedTableName := pgx.Identifier{tableName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ var q string
+
+ if isGrant {
+ q = fmt.Sprintf("REVOKE GRANT OPTION FOR %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedTableName, sanitizedPrincipalName)
+ } else {
+ q = fmt.Sprintf("REVOKE %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedTableName, sanitizedPrincipalName)
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
diff --git a/pkg/postgres/tables_test.go b/pkg/postgres/tables_test.go
new file mode 100644
index 00000000..4e85bad8
--- /dev/null
+++ b/pkg/postgres/tables_test.go
@@ -0,0 +1,40 @@
+package postgres
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+)
+
+func TestTableGrantRevoke(t *testing.T) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ client, err := New(ctx, container.Dsn())
+ require.NoError(t, err)
+
+ // Is grant true
+ err = client.GrantTable(ctx, "public", "test_table", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ err = client.RevokeTable(ctx, "public", "test_table", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ // is grant false
+ err = client.GrantTable(ctx, "public", "test_table", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeTable(ctx, "public", "test_table", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ // revoke without grant
+ err = client.RevokeTable(ctx, "public", "test_table", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeTable(ctx, "public", "test_table", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+}
diff --git a/pkg/postgres/views.go b/pkg/postgres/views.go
index 9bb18313..a7e0b2f9 100644
--- a/pkg/postgres/views.go
+++ b/pkg/postgres/views.go
@@ -4,9 +4,13 @@ import (
"context"
"database/sql"
"errors"
+ "fmt"
"strconv"
"strings"
+ "github.com/jackc/pgx/v4"
+ "go.uber.org/zap"
+
"github.com/georgysavva/scany/pgxscan"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
)
@@ -92,3 +96,42 @@ WHERE n."oid" = $1 AND c."relkind" = 'v'
return ret, nextPageToken, nil
}
+
+func (c *Client) GrantView(ctx context.Context, schema, viewName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("granting view", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedViewName := pgx.Identifier{viewName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+
+ q := fmt.Sprintf("GRANT %s ON %s.%s TO %s", sanitizedPrivilege, sanitizedSchema, sanitizedViewName, sanitizedPrincipalName)
+
+ if isGrant {
+ q += " WITH GRANT OPTION"
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
+
+func (c *Client) RevokeView(ctx context.Context, schema, viewName string, principalName string, privilege string, isGrant bool) error {
+ l := ctxzap.Extract(ctx)
+ l.Debug("revoking view", zap.String("principalName", principalName), zap.String("privilege", privilege))
+
+ sanitizedSchema := pgx.Identifier{schema}.Sanitize()
+ sanitizedViewName := pgx.Identifier{viewName}.Sanitize()
+ sanitizedPrincipalName := pgx.Identifier{principalName}.Sanitize()
+ sanitizedPrivilege := sanitizePrivilege(privilege)
+ var q string
+
+ if isGrant {
+ q = fmt.Sprintf("REVOKE GRANT OPTION FOR %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedViewName, sanitizedPrincipalName)
+ } else {
+ q = fmt.Sprintf("REVOKE %s ON TABLE %s.%s FROM %s", sanitizedPrivilege, sanitizedSchema, sanitizedViewName, sanitizedPrincipalName)
+ }
+
+ _, err := c.db.Exec(ctx, q)
+ return err
+}
diff --git a/pkg/postgres/views_test.go b/pkg/postgres/views_test.go
new file mode 100644
index 00000000..6cc0300b
--- /dev/null
+++ b/pkg/postgres/views_test.go
@@ -0,0 +1,40 @@
+package postgres
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/conductorone/baton-postgresql/pkg/testutil"
+)
+
+func TestViewGrantRevoke(t *testing.T) {
+ ctx := context.Background()
+
+ container := testutil.SetupPostgresContainer(ctx, t)
+
+ client, err := New(ctx, container.Dsn())
+ require.NoError(t, err)
+
+ // Is grant true
+ err = client.GrantView(ctx, "public", "test_table_view", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ err = client.RevokeView(ctx, "public", "test_table_view", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+
+ // is grant false
+ err = client.GrantView(ctx, "public", "test_table_view", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeView(ctx, "public", "test_table_view", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ // revoke without grant
+ err = client.RevokeView(ctx, "public", "test_table_view", container.Role(), Select.Name(), false)
+ require.NoError(t, err)
+
+ err = client.RevokeView(ctx, "public", "test_table_view", container.Role(), Select.Name(), true)
+ require.NoError(t, err)
+}
diff --git a/pkg/testutil/container.go b/pkg/testutil/container.go
new file mode 100644
index 00000000..ecf74764
--- /dev/null
+++ b/pkg/testutil/container.go
@@ -0,0 +1,87 @@
+package testutil
+
+import (
+ "context"
+ _ "embed"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/jackc/pgx/v4"
+ "github.com/jackc/pgx/v4/pgxpool"
+
+ "github.com/testcontainers/testcontainers-go"
+ "github.com/testcontainers/testcontainers-go/modules/postgres"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+//go:embed init.sql
+var initScript string
+
+type SQLContainer struct {
+ sqlDB *pgxpool.Pool
+ container *postgres.PostgresContainer
+ dsn string
+}
+
+func (d *SQLContainer) Dsn() string {
+ return d.dsn
+}
+
+func (d *SQLContainer) Db() *pgxpool.Pool {
+ return d.sqlDB
+}
+
+func (d *SQLContainer) Container() *postgres.PostgresContainer {
+ return d.container
+}
+
+func (d *SQLContainer) Role() string {
+ return "test_role"
+}
+
+func SetupPostgresContainer(ctx context.Context, t *testing.T) *SQLContainer {
+ ctx, cancel := context.WithTimeout(ctx, time.Minute)
+ defer cancel()
+
+ pgContainer, err := postgres.Run(ctx,
+ "postgres:15.3-alpine",
+ postgres.WithDatabase("postgres"),
+ postgres.WithUsername("postgres"),
+ postgres.WithPassword("postgres"),
+ testcontainers.WithWaitStrategy(
+ wait.ForLog("database system is ready to accept connections").
+ WithOccurrence(2).
+ WithStartupTimeout(5*time.Second),
+ ),
+ )
+
+ require.NoError(t, err)
+
+ connStr, err := pgContainer.ConnectionString(ctx, "sslmode=disable")
+ require.NoError(t, err)
+
+ t.Log("Postgres connection: " + connStr)
+
+ config, err := pgxpool.ParseConfig(connStr)
+ require.NoError(t, err)
+
+ config.ConnConfig.LogLevel = pgx.LogLevelDebug
+ config.ConnConfig.Logger = pgx.LoggerFunc(func(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) {
+ t.Logf("PGX %s: %s - %v", level.String(), msg, data)
+ })
+ config.MaxConns = 2
+
+ db, err := pgxpool.ConnectConfig(ctx, config)
+ require.NoError(t, err)
+
+ _, err = db.Exec(ctx, initScript)
+ require.NoError(t, err)
+
+ return &SQLContainer{
+ sqlDB: db,
+ container: pgContainer,
+ dsn: connStr,
+ }
+}
diff --git a/pkg/testutil/init.sql b/pkg/testutil/init.sql
new file mode 100644
index 00000000..e23efa36
--- /dev/null
+++ b/pkg/testutil/init.sql
@@ -0,0 +1,74 @@
+CREATE TABLE test_table
+(
+ id SERIAL PRIMARY KEY,
+ name VARCHAR(100) NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE TABLE test_table_log
+(
+ log_id SERIAL PRIMARY KEY,
+ test_table_id INTEGER NOT NULL,
+ change_type VARCHAR(10) NOT NULL,
+ changed_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (test_table_id) REFERENCES test_table (id)
+);
+
+-- Insert some sample data
+INSERT INTO test_table (name)
+VALUES ('Test Item 1'),
+ ('Test Item 2'),
+ ('Test Item 3');
+
+-- Create an index for testing
+CREATE INDEX idx_test_table_name ON test_table (name);
+
+-- Create a view for testing
+CREATE VIEW test_table_view AS
+SELECT id, name, created_at
+FROM test_table;
+
+-- Create a function for testing
+CREATE OR REPLACE FUNCTION get_test_item_count()
+ RETURNS INTEGER AS
+$$
+BEGIN
+ RETURN (SELECT COUNT(*) FROM test_table);
+END;
+$$ LANGUAGE plpgsql;
+
+-- Create a trigger function for testing
+CREATE OR REPLACE FUNCTION log_test_table_changes()
+ RETURNS TRIGGER AS
+$$
+BEGIN
+ INSERT INTO test_table_log (test_table_id, change_type, changed_at)
+ VALUES (NEW.id, TG_OP, CURRENT_TIMESTAMP);
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+
+-- Create a sequence for testing
+CREATE SEQUENCE test_table_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+-- Create procedures for testing
+CREATE OR REPLACE PROCEDURE add_test_item(item_name VARCHAR)
+ LANGUAGE plpgsql
+AS
+$$
+BEGIN
+ INSERT INTO test_table (name) VALUES (item_name);
+END;
+$$;
+
+-- create roles and users
+
+CREATE ROLE test_role WITH LOGIN PASSWORD 'test_password';
+CREATE USER test_user WITH PASSWORD 'test_password';
+GRANT test_role TO test_user;
+
diff --git a/vendor/dario.cat/mergo/.deepsource.toml b/vendor/dario.cat/mergo/.deepsource.toml
new file mode 100644
index 00000000..a8bc979e
--- /dev/null
+++ b/vendor/dario.cat/mergo/.deepsource.toml
@@ -0,0 +1,12 @@
+version = 1
+
+test_patterns = [
+ "*_test.go"
+]
+
+[[analyzers]]
+name = "go"
+enabled = true
+
+ [analyzers.meta]
+ import_path = "dario.cat/mergo"
\ No newline at end of file
diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore
new file mode 100644
index 00000000..45ad0f1a
--- /dev/null
+++ b/vendor/dario.cat/mergo/.gitignore
@@ -0,0 +1,36 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Golang/Intellij
+.idea
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/dario.cat/mergo/.travis.yml b/vendor/dario.cat/mergo/.travis.yml
new file mode 100644
index 00000000..d324c43b
--- /dev/null
+++ b/vendor/dario.cat/mergo/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+arch:
+ - amd64
+ - ppc64le
+install:
+ - go get -t
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+script:
+ - go test -race -v ./...
+after_script:
+ - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..469b4490
--- /dev/null
+++ b/vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/dario.cat/mergo/CONTRIBUTING.md b/vendor/dario.cat/mergo/CONTRIBUTING.md
new file mode 100644
index 00000000..0a1ff9f9
--- /dev/null
+++ b/vendor/dario.cat/mergo/CONTRIBUTING.md
@@ -0,0 +1,112 @@
+
+# Contributing to mergo
+
+First off, thanks for taking the time to contribute! ❤️
+
+All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
+
+> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
+> - Star the project
+> - Tweet about it
+> - Refer this project in your project's readme
+> - Mention the project at local meetups and tell your friends/colleagues
+
+
+## Table of Contents
+
+- [Code of Conduct](#code-of-conduct)
+- [I Have a Question](#i-have-a-question)
+- [I Want To Contribute](#i-want-to-contribute)
+- [Reporting Bugs](#reporting-bugs)
+- [Suggesting Enhancements](#suggesting-enhancements)
+
+## Code of Conduct
+
+This project and everyone participating in it is governed by the
+[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold this code. Please report unacceptable behavior
+to <>.
+
+
+## I Have a Question
+
+> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
+
+Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
+
+If you then still feel the need to ask a question and need clarification, we recommend the following:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new).
+- Provide as much context as you can about what you're running into.
+- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
+
+We will then take care of the issue as soon as possible.
+
+## I Want To Contribute
+
+> ### Legal Notice
+> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
+
+### Reporting Bugs
+
+
+#### Before Submitting a Bug Report
+
+A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
+
+- Make sure that you are using the latest version.
+- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
+- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
+- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
+- Collect information about the bug:
+- Stack trace (Traceback)
+- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
+- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
+- Possibly your input and the output
+- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
+
+
+#### How Do I Submit a Good Bug Report?
+
+> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
+
+
+We use GitHub issues to track bugs and errors. If you run into an issue with the project:
+
+- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
+- Explain the behavior you would expect and the actual behavior.
+- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
+- Provide the information you collected in the previous section.
+
+Once it's filed:
+
+- The project team will label the issue accordingly.
+- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
+- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
+
+### Suggesting Enhancements
+
+This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
+
+
+#### Before Submitting an Enhancement
+
+- Make sure that you are using the latest version.
+- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
+- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
+- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
+
+
+#### How Do I Submit a Good Enhancement Suggestion?
+
+Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
+
+- Use a **clear and descriptive title** for the issue to identify the suggestion.
+- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
+- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
+- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
+- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
+
+
+## Attribution
+This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
diff --git a/vendor/dario.cat/mergo/LICENSE b/vendor/dario.cat/mergo/LICENSE
new file mode 100644
index 00000000..68668029
--- /dev/null
+++ b/vendor/dario.cat/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md
new file mode 100644
index 00000000..0b3c4888
--- /dev/null
+++ b/vendor/dario.cat/mergo/README.md
@@ -0,0 +1,258 @@
+# Mergo
+
+[![GitHub release][5]][6]
+[![GoCard][7]][8]
+[![Test status][1]][2]
+[![OpenSSF Scorecard][21]][22]
+[![OpenSSF Best Practices][19]][20]
+[![Coverage status][9]][10]
+[![Sourcegraph][11]][12]
+[![FOSSA status][13]][14]
+
+[![GoDoc][3]][4]
+[![Become my sponsor][15]][16]
+[![Tidelift][17]][18]
+
+[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
+[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://img.shields.io/github/release/imdario/mergo.svg
+[6]: https://github.com/imdario/mergo/releases
+[7]: https://goreportcard.com/badge/imdario/mergo
+[8]: https://goreportcard.com/report/github.com/imdario/mergo
+[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[10]: https://coveralls.io/github/imdario/mergo?branch=master
+[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
+[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
+[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
+[15]: https://img.shields.io/github/sponsors/imdario
+[16]: https://github.com/sponsors/imdario
+[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
+[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
+[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
+[20]: https://bestpractices.coreinfrastructure.org/projects/7177
+[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
+[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases.
+
+### Important notes
+
+#### 1.0.0
+
+In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released.
+
+If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL:
+
+```
+replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
+```
+
+#### 0.3.9
+
+Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
+
+Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
+
+
+
+
+### Mergo in the wild
+
+Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including:
+
+* [containerd/containerd](https://github.com/containerd/containerd)
+* [datadog/datadog-agent](https://github.com/datadog/datadog-agent)
+* [docker/cli/](https://github.com/docker/cli/)
+* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
+* [go-micro/go-micro](https://github.com/go-micro/go-micro)
+* [grafana/loki](https://github.com/grafana/loki)
+* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+* [masterminds/sprig](github.com/Masterminds/sprig)
+* [moby/moby](https://github.com/moby/moby)
+* [slackhq/nebula](https://github.com/slackhq/nebula)
+* [volcano-sh/volcano](https://github.com/volcano-sh/volcano)
+
+## Install
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+}
+```
+
+If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A *string
+ B int64
+}
+
+func main() {
+ first := "first"
+ second := "second"
+ src := Foo{
+ A: &first,
+ B: 2,
+ }
+
+ dest := Foo{
+ A: &second,
+ B: 1,
+ }
+
+ mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference)
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+Here is a nice example:
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+)
+
+type Foo struct {
+ A string
+ B int64
+}
+
+func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+ go get gopkg.in/yaml.v3
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+)
+
+type timeTransformer struct {
+}
+
+func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+}
+
+type Snapshot struct {
+ Time time.Time
+ // ...
+}
+
+func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md
new file mode 100644
index 00000000..a5de61f7
--- /dev/null
+++ b/vendor/dario.cat/mergo/SECURITY.md
@@ -0,0 +1,14 @@
+# Security Policy
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.3.x | :white_check_mark: |
+| < 0.3 | :x: |
+
+## Security contact information
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure.
diff --git a/vendor/dario.cat/mergo/doc.go b/vendor/dario.cat/mergo/doc.go
new file mode 100644
index 00000000..7d96ec05
--- /dev/null
+++ b/vendor/dario.cat/mergo/doc.go
@@ -0,0 +1,148 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+# Status
+
+It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
+
+# Important notes
+
+1.0.0
+
+In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
+
+0.3.9
+
+Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
+
+Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
+
+If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
+
+# Install
+
+Do your usual installation procedure:
+
+ go get dario.cat/mergo
+
+ // use in your .go code
+ import (
+ "dario.cat/mergo"
+ )
+
+# Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+ if err := mergo.Merge(&dst, src); err != nil {
+ // ...
+ }
+
+Also, you can merge overwriting values using the transformer WithOverride.
+
+ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+ // ...
+ }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+ if err := mergo.Map(&dst, srcMap); err != nil {
+ // ...
+ }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+Here is a nice example:
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ )
+
+ type Foo struct {
+ A string
+ B int64
+ }
+
+ func main() {
+ src := Foo{
+ A: "one",
+ B: 2,
+ }
+ dest := Foo{
+ A: "two",
+ }
+ mergo.Merge(&dest, src)
+ fmt.Println(dest)
+ // Will print
+ // {two 2}
+ }
+
+# Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
+
+ package main
+
+ import (
+ "fmt"
+ "dario.cat/mergo"
+ "reflect"
+ "time"
+ )
+
+ type timeTransformer struct {
+ }
+
+ func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+ if typ == reflect.TypeOf(time.Time{}) {
+ return func(dst, src reflect.Value) error {
+ if dst.CanSet() {
+ isZero := dst.MethodByName("IsZero")
+ result := isZero.Call([]reflect.Value{})
+ if result[0].Bool() {
+ dst.Set(src)
+ }
+ }
+ return nil
+ }
+ }
+ return nil
+ }
+
+ type Snapshot struct {
+ Time time.Time
+ // ...
+ }
+
+ func main() {
+ src := Snapshot{time.Now()}
+ dest := Snapshot{}
+ mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
+ fmt.Println(dest)
+ // Will print
+ // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+ }
+
+# Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
+
+# About
+
+Written by Dario Castañé: https://da.rio.hn
+
+# License
+
+BSD 3-Clause license, as Go language.
+*/
+package mergo
diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go
new file mode 100644
index 00000000..759b4f74
--- /dev/null
+++ b/vendor/dario.cat/mergo/map.go
@@ -0,0 +1,178 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+ if s == "" {
+ return s
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+ r, _ := utf8.DecodeRuneInString(field.Name)
+ return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+ zeroValue := reflect.Value{}
+ switch dst.Kind() {
+ case reflect.Map:
+ dstMap := dst.Interface().(map[string]interface{})
+ for i, n := 0, src.NumField(); i < n; i++ {
+ srcType := src.Type()
+ field := srcType.Field(i)
+ if !isExported(field) {
+ continue
+ }
+ fieldName := field.Name
+ fieldName = changeInitialCase(fieldName, unicode.ToLower)
+ if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue {
+ dstMap[fieldName] = src.Field(i).Interface()
+ }
+ }
+ case reflect.Ptr:
+ if dst.IsNil() {
+ v := reflect.New(dst.Type().Elem())
+ dst.Set(v)
+ }
+ dst = dst.Elem()
+ fallthrough
+ case reflect.Struct:
+ srcMap := src.Interface().(map[string]interface{})
+ for key := range srcMap {
+ config.overwriteWithEmptyValue = true
+ srcValue := srcMap[key]
+ fieldName := changeInitialCase(key, unicode.ToUpper)
+ dstElement := dst.FieldByName(fieldName)
+ if dstElement == zeroValue {
+ // We discard it because the field doesn't exist.
+ continue
+ }
+ srcElement := reflect.ValueOf(srcValue)
+ dstKind := dstElement.Kind()
+ srcKind := srcElement.Kind()
+ if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+ srcElement = srcElement.Elem()
+ srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+ } else if dstKind == reflect.Ptr {
+ // Can this work? I guess it can't.
+ if srcKind != reflect.Ptr && srcElement.CanAddr() {
+ srcPtr := srcElement.Addr()
+ srcElement = reflect.ValueOf(srcPtr)
+ srcKind = reflect.Ptr
+ }
+ }
+
+ if !srcElement.IsValid() {
+ continue
+ }
+ if srcKind == dstKind {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else if srcKind == reflect.Map {
+ if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+ }
+ }
+ }
+ return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ // To be friction-less, we redirect equal-type arguments
+ // to deepMerge. Only because arguments can be anything.
+ if vSrc.Kind() == vDst.Kind() {
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+ }
+ switch vSrc.Kind() {
+ case reflect.Struct:
+ if vDst.Kind() != reflect.Map {
+ return ErrExpectedMapAsDestination
+ }
+ case reflect.Map:
+ if vDst.Kind() != reflect.Struct {
+ return ErrExpectedStructAsDestination
+ }
+ default:
+ return ErrNotSupported
+ }
+ return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go
new file mode 100644
index 00000000..fd47c95b
--- /dev/null
+++ b/vendor/dario.cat/mergo/merge.go
@@ -0,0 +1,409 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "fmt"
+ "reflect"
+)
+
+func hasMergeableFields(dst reflect.Value) (exported bool) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ field := dst.Type().Field(i)
+ if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+ exported = exported || hasMergeableFields(dst.Field(i))
+ } else if isExportedComponent(&field) {
+ exported = exported || len(field.PkgPath) == 0
+ }
+ }
+ return
+}
+
+func isExportedComponent(field *reflect.StructField) bool {
+ pkgPath := field.PkgPath
+ if len(pkgPath) > 0 {
+ return false
+ }
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ return false
+ }
+ return true
+}
+
+type Config struct {
+ Transformers Transformers
+ Overwrite bool
+ ShouldNotDereference bool
+ AppendSlice bool
+ TypeCheck bool
+ overwriteWithEmptyValue bool
+ overwriteSliceWithEmptyValue bool
+ sliceDeepCopy bool
+ debug bool
+}
+
+type Transformers interface {
+ Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+ overwrite := config.Overwrite
+ typeCheck := config.TypeCheck
+ overwriteWithEmptySrc := config.overwriteWithEmptyValue
+ overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
+ sliceDeepCopy := config.sliceDeepCopy
+
+ if !src.IsValid() {
+ return
+ }
+ if dst.CanAddr() {
+ addr := dst.UnsafeAddr()
+ h := 17 * addr
+ seen := visited[h]
+ typ := dst.Type()
+ for p := seen; p != nil; p = p.next {
+ if p.ptr == addr && p.typ == typ {
+ return nil
+ }
+ }
+ // Remember, remember...
+ visited[h] = &visit{typ, seen, addr}
+ }
+
+ if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
+ if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+ err = fn(dst, src)
+ return
+ }
+ }
+
+ switch dst.Kind() {
+ case reflect.Struct:
+ if hasMergeableFields(dst) {
+ for i, n := 0, dst.NumField(); i < n; i++ {
+ if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ } else {
+ if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
+ dst.Set(src)
+ }
+ }
+ case reflect.Map:
+ if dst.IsNil() && !src.IsNil() {
+ if dst.CanSet() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ } else {
+ dst = src
+ return
+ }
+ }
+
+ if src.Kind() != reflect.Map {
+ if overwrite && dst.CanSet() {
+ dst.Set(src)
+ }
+ return
+ }
+
+ for _, key := range src.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ continue
+ }
+ dstElement := dst.MapIndex(key)
+ switch srcElement.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+ if srcElement.IsNil() {
+ if overwrite {
+ dst.SetMapIndex(key, srcElement)
+ }
+ continue
+ }
+ fallthrough
+ default:
+ if !srcElement.CanInterface() {
+ continue
+ }
+ switch reflect.TypeOf(srcElement.Interface()).Kind() {
+ case reflect.Struct:
+ fallthrough
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ srcMapElm := srcElement
+ dstMapElm := dstElement
+ if srcMapElm.CanInterface() {
+ srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+ if dstMapElm.IsValid() {
+ dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+ }
+ }
+ if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+ return
+ }
+ case reflect.Slice:
+ srcSlice := reflect.ValueOf(srcElement.Interface())
+
+ var dstSlice reflect.Value
+ if !dstElement.IsValid() || dstElement.IsNil() {
+ dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+ } else {
+ dstSlice = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ if typeCheck && srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = srcSlice
+ } else if config.AppendSlice {
+ if srcSlice.Type() != dstSlice.Type() {
+ return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+ }
+ dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+ } else if sliceDeepCopy {
+ i := 0
+ for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
+ srcElement := srcSlice.Index(i)
+ dstElement := dstSlice.Index(i)
+
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+
+ }
+ dst.SetMapIndex(key, dstSlice)
+ }
+ }
+
+ if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
+ continue
+ }
+ if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
+ continue
+ }
+ }
+
+ if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ dst.SetMapIndex(key, srcElement)
+ }
+ }
+
+ // Ensure that all keys in dst are deleted if they are not in src.
+ if overwriteWithEmptySrc {
+ for _, key := range dst.MapKeys() {
+ srcElement := src.MapIndex(key)
+ if !srcElement.IsValid() {
+ dst.SetMapIndex(key, reflect.Value{})
+ }
+ }
+ }
+ case reflect.Slice:
+ if !dst.CanSet() {
+ break
+ }
+ if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
+ dst.Set(src)
+ } else if config.AppendSlice {
+ if src.Type() != dst.Type() {
+ return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+ }
+ dst.Set(reflect.AppendSlice(dst, src))
+ } else if sliceDeepCopy {
+ for i := 0; i < src.Len() && i < dst.Len(); i++ {
+ srcElement := src.Index(i)
+ dstElement := dst.Index(i)
+ if srcElement.CanInterface() {
+ srcElement = reflect.ValueOf(srcElement.Interface())
+ }
+ if dstElement.CanInterface() {
+ dstElement = reflect.ValueOf(dstElement.Interface())
+ }
+
+ if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+ return
+ }
+ }
+ }
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Interface:
+ if isReflectNil(src) {
+ if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if src.Kind() != reflect.Interface {
+ if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ } else if src.Kind() == reflect.Ptr {
+ if !config.ShouldNotDereference {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ } else if src.Elem().Kind() != reflect.Struct {
+ if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
+ dst.Set(src)
+ }
+ }
+ } else if dst.Elem().Type() == src.Type() {
+ if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+ return
+ }
+ } else {
+ return ErrDifferentArgumentsTypes
+ }
+ break
+ }
+
+ if dst.IsNil() || overwrite {
+ if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
+ dst.Set(src)
+ }
+ break
+ }
+
+ if dst.Elem().Kind() == src.Elem().Kind() {
+ if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+ return
+ }
+ break
+ }
+ default:
+ mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
+ if mustSet {
+ if dst.CanSet() {
+ dst.Set(src)
+ } else {
+ dst = src
+ }
+ }
+ }
+
+ return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+ return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+ return func(config *Config) {
+ config.Transformers = transformers
+ }
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+ config.Overwrite = true
+}
+
+// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
+func WithOverwriteWithEmptyValue(config *Config) {
+ config.Overwrite = true
+ config.overwriteWithEmptyValue = true
+}
+
+// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
+func WithOverrideEmptySlice(config *Config) {
+ config.overwriteSliceWithEmptyValue = true
+}
+
+// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
+// (i.e. a non-nil pointer is never considered empty).
+func WithoutDereference(config *Config) {
+ config.ShouldNotDereference = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it.
+func WithAppendSlice(config *Config) {
+ config.AppendSlice = true
+}
+
+// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
+func WithTypeCheck(config *Config) {
+ config.TypeCheck = true
+}
+
+// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
+func WithSliceDeepCopy(config *Config) {
+ config.sliceDeepCopy = true
+ config.Overwrite = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+ if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
+ return ErrNonPointerArgument
+ }
+ var (
+ vDst, vSrc reflect.Value
+ err error
+ )
+
+ config := &Config{}
+
+ for _, opt := range opts {
+ opt(config)
+ }
+
+ if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+ return err
+ }
+ if vDst.Type() != vSrc.Type() {
+ return ErrDifferentArgumentsTypes
+ }
+ return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
+
+// IsReflectNil is the reflect value provided nil
+func isReflectNil(v reflect.Value) bool {
+ k := v.Kind()
+ switch k {
+ case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
+ // Both interface and slice are nil if first word is 0.
+ // Both are always bigger than a word; assume flagIndir.
+ return v.IsNil()
+ default:
+ return false
+ }
+}
diff --git a/vendor/dario.cat/mergo/mergo.go b/vendor/dario.cat/mergo/mergo.go
new file mode 100644
index 00000000..0a721e2d
--- /dev/null
+++ b/vendor/dario.cat/mergo/mergo.go
@@ -0,0 +1,81 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+ "errors"
+ "reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+ ErrNilArguments = errors.New("src and dst must not be nil")
+ ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
+ ErrNotSupported = errors.New("only structs, maps, and slices are supported")
+ ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
+ ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+ ErrNonPointerArgument = errors.New("dst must be a pointer")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress. The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+ typ reflect.Type
+ next *visit
+ ptr uintptr
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return true
+ }
+ if shouldDereference {
+ return isEmptyValue(v.Elem(), shouldDereference)
+ }
+ return false
+ case reflect.Func:
+ return v.IsNil()
+ case reflect.Invalid:
+ return true
+ }
+ return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+ if dst == nil || src == nil {
+ err = ErrNilArguments
+ return
+ }
+ vDst = reflect.ValueOf(dst).Elem()
+ if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
+ err = ErrNotSupported
+ return
+ }
+ vSrc = reflect.ValueOf(src)
+ // We check if vSrc is a pointer to dereference it.
+ if vSrc.Kind() == reflect.Ptr {
+ vSrc = vSrc.Elem()
+ }
+ return
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 00000000..e3d9a64d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
new file mode 100644
index 00000000..261c041e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/README.md
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
new file mode 100644
index 00000000..96504a33
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/constants.go
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+ // ECMA-48 Set Graphics Rendition
+ // Note:
+ // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+ // -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+ // -- Windows does not expose the per-window cursor (i.e., caret) blink times
+ ANSI_SGR_RESET = 0
+ ANSI_SGR_BOLD = 1
+ ANSI_SGR_DIM = 2
+ _ANSI_SGR_ITALIC = 3
+ ANSI_SGR_UNDERLINE = 4
+ _ANSI_SGR_BLINKSLOW = 5
+ _ANSI_SGR_BLINKFAST = 6
+ ANSI_SGR_REVERSE = 7
+ _ANSI_SGR_INVISIBLE = 8
+ _ANSI_SGR_LINETHROUGH = 9
+ _ANSI_SGR_FONT_00 = 10
+ _ANSI_SGR_FONT_01 = 11
+ _ANSI_SGR_FONT_02 = 12
+ _ANSI_SGR_FONT_03 = 13
+ _ANSI_SGR_FONT_04 = 14
+ _ANSI_SGR_FONT_05 = 15
+ _ANSI_SGR_FONT_06 = 16
+ _ANSI_SGR_FONT_07 = 17
+ _ANSI_SGR_FONT_08 = 18
+ _ANSI_SGR_FONT_09 = 19
+ _ANSI_SGR_FONT_10 = 20
+ _ANSI_SGR_DOUBLEUNDERLINE = 21
+ ANSI_SGR_BOLD_DIM_OFF = 22
+ _ANSI_SGR_ITALIC_OFF = 23
+ ANSI_SGR_UNDERLINE_OFF = 24
+ _ANSI_SGR_BLINK_OFF = 25
+ _ANSI_SGR_RESERVED_00 = 26
+ ANSI_SGR_REVERSE_OFF = 27
+ _ANSI_SGR_INVISIBLE_OFF = 28
+ _ANSI_SGR_LINETHROUGH_OFF = 29
+ ANSI_SGR_FOREGROUND_BLACK = 30
+ ANSI_SGR_FOREGROUND_RED = 31
+ ANSI_SGR_FOREGROUND_GREEN = 32
+ ANSI_SGR_FOREGROUND_YELLOW = 33
+ ANSI_SGR_FOREGROUND_BLUE = 34
+ ANSI_SGR_FOREGROUND_MAGENTA = 35
+ ANSI_SGR_FOREGROUND_CYAN = 36
+ ANSI_SGR_FOREGROUND_WHITE = 37
+ _ANSI_SGR_RESERVED_01 = 38
+ ANSI_SGR_FOREGROUND_DEFAULT = 39
+ ANSI_SGR_BACKGROUND_BLACK = 40
+ ANSI_SGR_BACKGROUND_RED = 41
+ ANSI_SGR_BACKGROUND_GREEN = 42
+ ANSI_SGR_BACKGROUND_YELLOW = 43
+ ANSI_SGR_BACKGROUND_BLUE = 44
+ ANSI_SGR_BACKGROUND_MAGENTA = 45
+ ANSI_SGR_BACKGROUND_CYAN = 46
+ ANSI_SGR_BACKGROUND_WHITE = 47
+ _ANSI_SGR_RESERVED_02 = 48
+ ANSI_SGR_BACKGROUND_DEFAULT = 49
+ // 50 - 65: Unsupported
+
+ ANSI_MAX_CMD_LENGTH = 4096
+
+ MAX_INPUT_EVENTS = 128
+ DEFAULT_WIDTH = 80
+ DEFAULT_HEIGHT = 24
+
+ ANSI_BEL = 0x07
+ ANSI_BACKSPACE = 0x08
+ ANSI_TAB = 0x09
+ ANSI_LINE_FEED = 0x0A
+ ANSI_VERTICAL_TAB = 0x0B
+ ANSI_FORM_FEED = 0x0C
+ ANSI_CARRIAGE_RETURN = 0x0D
+ ANSI_ESCAPE_PRIMARY = 0x1B
+ ANSI_ESCAPE_SECONDARY = 0x5B
+ ANSI_OSC_STRING_ENTRY = 0x5D
+ ANSI_COMMAND_FIRST = 0x40
+ ANSI_COMMAND_LAST = 0x7E
+ DCS_ENTRY = 0x90
+ CSI_ENTRY = 0x9B
+ OSC_STRING = 0x9D
+ ANSI_PARAMETER_SEP = ";"
+ ANSI_CMD_G0 = '('
+ ANSI_CMD_G1 = ')'
+ ANSI_CMD_G2 = '*'
+ ANSI_CMD_G3 = '+'
+ ANSI_CMD_DECPNM = '>'
+ ANSI_CMD_DECPAM = '='
+ ANSI_CMD_OSC = ']'
+ ANSI_CMD_STR_TERM = '\\'
+
+ KEY_CONTROL_PARAM_2 = ";2"
+ KEY_CONTROL_PARAM_3 = ";3"
+ KEY_CONTROL_PARAM_4 = ";4"
+ KEY_CONTROL_PARAM_5 = ";5"
+ KEY_CONTROL_PARAM_6 = ";6"
+ KEY_CONTROL_PARAM_7 = ";7"
+ KEY_CONTROL_PARAM_8 = ";8"
+ KEY_ESC_CSI = "\x1B["
+ KEY_ESC_N = "\x1BN"
+ KEY_ESC_O = "\x1BO"
+
+ FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+ bytes := make([]byte, 0, 32)
+ for i := start; i <= end; i++ {
+ bytes = append(bytes, byte(i))
+ }
+
+ return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE 20+A0 hex Always and everywhere a blank space
+// Intermediate 20-2F hex !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters 30-3F hex 0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics 40-7E hex (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+ escapeToGroundBytes := getByteRange(0x30, 0x4F)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+ escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+ escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+ return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+ executeBytes := getByteRange(0x00, 0x17)
+ executeBytes = append(executeBytes, 0x19)
+ executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+ return executeBytes
+}
+
+func getToGroundBytes() []byte {
+ groundBytes := []byte{0x18}
+ groundBytes = append(groundBytes, 0x1A)
+ groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+ groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+ groundBytes = append(groundBytes, 0x99)
+ groundBytes = append(groundBytes, 0x9A)
+ groundBytes = append(groundBytes, 0x9C)
+ return groundBytes
+}
+
+// Delete 7F hex Always and everywhere ignored
+// C1 Control 80-9F hex 32 additional control characters
+// G1 Displayable A1-FE hex 94 additional displayable characters
+// Special A0+FF hex Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
new file mode 100644
index 00000000..8d66e777
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/context.go
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+ currentChar byte
+ paramBuffer []byte
+ interBuffer []byte
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
new file mode 100644
index 00000000..bcbe00d0
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+ baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+ csiState.parser.logf("CsiEntry::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ return csiState.parser.csiParam, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+ csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ case csiState.parser.csiParam:
+ switch {
+ case sliceContains(csiParams, csiState.parser.context.currentChar):
+ csiState.parser.collectParam()
+ case sliceContains(intermeds, csiState.parser.context.currentChar):
+ csiState.parser.collectInter()
+ }
+ }
+
+ return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+ csiState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
new file mode 100644
index 00000000..7ed5e01c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+ baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+ csiState.parser.logf("CsiParam::Handle %#x", b)
+
+ nextState, err := csiState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(alphabetics, b):
+ return csiState.parser.ground, nil
+ case sliceContains(csiCollectables, b):
+ csiState.parser.collectParam()
+ return csiState, nil
+ case sliceContains(executors, b):
+ return csiState, csiState.parser.execute()
+ }
+
+ return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+ csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+ csiState.baseState.Transition(s)
+
+ switch s {
+ case csiState.parser.ground:
+ return csiState.parser.csiDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
new file mode 100644
index 00000000..1c719db9
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+ baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+ escState.parser.logf("escapeIntermediateState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(intermeds, b):
+ return escState, escState.parser.collectInter()
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeIntermediateToGroundBytes, b):
+ return escState.parser.ground, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+ escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
new file mode 100644
index 00000000..6390abd2
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+ baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+ escState.parser.logf("escapeState::Handle %#x", b)
+ nextState, err := escState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case b == ANSI_ESCAPE_SECONDARY:
+ return escState.parser.csiEntry, nil
+ case b == ANSI_OSC_STRING_ENTRY:
+ return escState.parser.oscString, nil
+ case sliceContains(executors, b):
+ return escState, escState.parser.execute()
+ case sliceContains(escapeToGroundBytes, b):
+ return escState.parser.ground, nil
+ case sliceContains(intermeds, b):
+ return escState.parser.escapeIntermediate, nil
+ }
+
+ return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+ escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
+ escState.baseState.Transition(s)
+
+ switch s {
+ case escState.parser.ground:
+ return escState.parser.escDispatch()
+ case escState.parser.escapeIntermediate:
+ return escState.parser.collectInter()
+ }
+
+ return nil
+}
+
+func (escState escapeState) Enter() error {
+ escState.parser.clear()
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
new file mode 100644
index 00000000..98087b38
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+ // Print
+ Print(b byte) error
+
+ // Execute C0 commands
+ Execute(b byte) error
+
+ // CUrsor Up
+ CUU(int) error
+
+ // CUrsor Down
+ CUD(int) error
+
+ // CUrsor Forward
+ CUF(int) error
+
+ // CUrsor Backward
+ CUB(int) error
+
+ // Cursor to Next Line
+ CNL(int) error
+
+ // Cursor to Previous Line
+ CPL(int) error
+
+ // Cursor Horizontal position Absolute
+ CHA(int) error
+
+ // Vertical line Position Absolute
+ VPA(int) error
+
+ // CUrsor Position
+ CUP(int, int) error
+
+ // Horizontal and Vertical Position (depends on PUM)
+ HVP(int, int) error
+
+ // Text Cursor Enable Mode
+ DECTCEM(bool) error
+
+ // Origin Mode
+ DECOM(bool) error
+
+ // 132 Column Mode
+ DECCOLM(bool) error
+
+ // Erase in Display
+ ED(int) error
+
+ // Erase in Line
+ EL(int) error
+
+ // Insert Line
+ IL(int) error
+
+ // Delete Line
+ DL(int) error
+
+ // Insert Character
+ ICH(int) error
+
+ // Delete Character
+ DCH(int) error
+
+ // Set Graphics Rendition
+ SGR([]int) error
+
+ // Pan Down
+ SU(int) error
+
+ // Pan Up
+ SD(int) error
+
+ // Device Attributes
+ DA([]string) error
+
+ // Set Top and Bottom Margins
+ DECSTBM(int, int) error
+
+ // Index
+ IND() error
+
+ // Reverse Index
+ RI() error
+
+ // Flush updates from previous commands
+ Flush() error
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
new file mode 100644
index 00000000..52451e94
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+ baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+ gs.parser.context.currentChar = b
+
+ nextState, err := gs.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case sliceContains(printables, b):
+ return gs, gs.parser.print()
+
+ case sliceContains(executors, b):
+ return gs, gs.parser.execute()
+ }
+
+ return gs, nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
new file mode 100644
index 00000000..593b10ab
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -0,0 +1,31 @@
+package ansiterm
+
+type oscStringState struct {
+ baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+ oscState.parser.logf("OscString::Handle %#x", b)
+ nextState, err := oscState.baseState.Handle(b)
+ if nextState != nil || err != nil {
+ return nextState, err
+ }
+
+ switch {
+ case isOscStringTerminator(b):
+ return oscState.parser.ground, nil
+ }
+
+ return oscState, nil
+}
+
+// See below for OSC string terminators for linux
+// http://man7.org/linux/man-pages/man4/console_codes.4.html
+func isOscStringTerminator(b byte) bool {
+
+ if b == ANSI_BEL || b == 0x5C {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
new file mode 100644
index 00000000..03cec7ad
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -0,0 +1,151 @@
+package ansiterm
+
+import (
+ "errors"
+ "log"
+ "os"
+)
+
+type AnsiParser struct {
+ currState state
+ eventHandler AnsiEventHandler
+ context *ansiContext
+ csiEntry state
+ csiParam state
+ dcsEntry state
+ escape state
+ escapeIntermediate state
+ error state
+ ground state
+ oscString state
+ stateMap []state
+
+ logf func(string, ...interface{})
+}
+
+type Option func(*AnsiParser)
+
+func WithLogf(f func(string, ...interface{})) Option {
+ return func(ap *AnsiParser) {
+ ap.logf = f
+ }
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
+ ap := &AnsiParser{
+ eventHandler: evtHandler,
+ context: &ansiContext{},
+ }
+ for _, o := range opts {
+ o(ap)
+ }
+
+ if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+ logFile, _ := os.Create("ansiParser.log")
+ logger := log.New(logFile, "", log.LstdFlags)
+ if ap.logf != nil {
+ l := ap.logf
+ ap.logf = func(s string, v ...interface{}) {
+ l(s, v...)
+ logger.Printf(s, v...)
+ }
+ } else {
+ ap.logf = logger.Printf
+ }
+ }
+
+ if ap.logf == nil {
+ ap.logf = func(string, ...interface{}) {}
+ }
+
+ ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
+ ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
+ ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
+ ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
+ ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
+ ap.error = errorState{baseState{name: "Error", parser: ap}}
+ ap.ground = groundState{baseState{name: "Ground", parser: ap}}
+ ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
+
+ ap.stateMap = []state{
+ ap.csiEntry,
+ ap.csiParam,
+ ap.dcsEntry,
+ ap.escape,
+ ap.escapeIntermediate,
+ ap.error,
+ ap.ground,
+ ap.oscString,
+ }
+
+ ap.currState = getState(initialState, ap.stateMap)
+
+ ap.logf("CreateParser: parser %p", ap)
+ return ap
+}
+
+func getState(name string, states []state) state {
+ for _, el := range states {
+ if el.Name() == name {
+ return el
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+ for i, b := range bytes {
+ if err := ap.handle(b); err != nil {
+ return i, err
+ }
+ }
+
+ return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+ ap.context.currentChar = b
+ newState, err := ap.currState.Handle(b)
+ if err != nil {
+ return err
+ }
+
+ if newState == nil {
+ ap.logf("WARNING: newState is nil")
+ return errors.New("New state of 'nil' is invalid.")
+ }
+
+ if newState != ap.currState {
+ if err := ap.changeState(newState); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+ ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+ // Exit old state
+ if err := ap.currState.Exit(); err != nil {
+ ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+ return err
+ }
+
+ // Perform transition action
+ if err := ap.currState.Transition(newState); err != nil {
+ ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+ return err
+ }
+
+ // Enter new state
+ if err := newState.Enter(); err != nil {
+ ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
+ return err
+ }
+
+ ap.currState = newState
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
new file mode 100644
index 00000000..de0a1f9c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -0,0 +1,99 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+ paramBuff := make([]byte, 0, 0)
+ params := []string{}
+
+ for _, v := range bytes {
+ if v == ';' {
+ if len(paramBuff) > 0 {
+ // Completed parameter, append it to the list
+ s := string(paramBuff)
+ params = append(params, s)
+ paramBuff = make([]byte, 0, 0)
+ }
+ } else {
+ paramBuff = append(paramBuff, v)
+ }
+ }
+
+ // Last parameter may not be terminated with ';'
+ if len(paramBuff) > 0 {
+ s := string(paramBuff)
+ params = append(params, s)
+ }
+
+ return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+ return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+ i := getInts(params, 1, dflt)[0]
+ return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+ ints := []int{}
+
+ for _, v := range params {
+ i, _ := strconv.Atoi(v)
+ // Zero is mapped to the default value in VT100.
+ if i == 0 {
+ i = dflt
+ }
+ ints = append(ints, i)
+ }
+
+ if len(ints) < minCount {
+ remaining := minCount - len(ints)
+ for i := 0; i < remaining; i++ {
+ ints = append(ints, dflt)
+ }
+ }
+
+ return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+ switch param {
+ case "?3":
+ return ap.eventHandler.DECCOLM(set)
+ case "?6":
+ return ap.eventHandler.DECOM(set)
+ case "?25":
+ return ap.eventHandler.DECTCEM(set)
+ }
+ return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], true)
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+ if len(params) == 1 {
+ return ap.modeDispatch(params[0], false)
+ }
+
+ return nil
+}
+
+func getEraseParam(params []string) int {
+ param := getInt(params, 0)
+ if param < 0 || 3 < param {
+ param = 0
+ }
+
+ return param
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
new file mode 100644
index 00000000..0bb5e51e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -0,0 +1,119 @@
+package ansiterm
+
+func (ap *AnsiParser) collectParam() error {
+ currChar := ap.context.currentChar
+ ap.logf("collectParam %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+ currChar := ap.context.currentChar
+ ap.logf("collectInter %#x", currChar)
+ ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+ return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ intermeds := ap.context.interBuffer
+ ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
+ ap.logf("escDispatch: %v(%v)", cmd, intermeds)
+
+ switch cmd {
+ case "D": // IND
+ return ap.eventHandler.IND()
+ case "E": // NEL, equivalent to CRLF
+ err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+ if err == nil {
+ err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+ }
+ return err
+ case "M": // RI
+ return ap.eventHandler.RI()
+ }
+
+ return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+ cmd, _ := parseCmd(*ap.context)
+ params, _ := parseParams(ap.context.paramBuffer)
+ ap.logf("Parsed params: %v with length: %d", params, len(params))
+
+ ap.logf("csiDispatch: %v(%v)", cmd, params)
+
+ switch cmd {
+ case "@":
+ return ap.eventHandler.ICH(getInt(params, 1))
+ case "A":
+ return ap.eventHandler.CUU(getInt(params, 1))
+ case "B":
+ return ap.eventHandler.CUD(getInt(params, 1))
+ case "C":
+ return ap.eventHandler.CUF(getInt(params, 1))
+ case "D":
+ return ap.eventHandler.CUB(getInt(params, 1))
+ case "E":
+ return ap.eventHandler.CNL(getInt(params, 1))
+ case "F":
+ return ap.eventHandler.CPL(getInt(params, 1))
+ case "G":
+ return ap.eventHandler.CHA(getInt(params, 1))
+ case "H":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.CUP(x, y)
+ case "J":
+ param := getEraseParam(params)
+ return ap.eventHandler.ED(param)
+ case "K":
+ param := getEraseParam(params)
+ return ap.eventHandler.EL(param)
+ case "L":
+ return ap.eventHandler.IL(getInt(params, 1))
+ case "M":
+ return ap.eventHandler.DL(getInt(params, 1))
+ case "P":
+ return ap.eventHandler.DCH(getInt(params, 1))
+ case "S":
+ return ap.eventHandler.SU(getInt(params, 1))
+ case "T":
+ return ap.eventHandler.SD(getInt(params, 1))
+ case "c":
+ return ap.eventHandler.DA(params)
+ case "d":
+ return ap.eventHandler.VPA(getInt(params, 1))
+ case "f":
+ ints := getInts(params, 2, 1)
+ x, y := ints[0], ints[1]
+ return ap.eventHandler.HVP(x, y)
+ case "h":
+ return ap.hDispatch(params)
+ case "l":
+ return ap.lDispatch(params)
+ case "m":
+ return ap.eventHandler.SGR(getInts(params, 1, 0))
+ case "r":
+ ints := getInts(params, 2, 1)
+ top, bottom := ints[0], ints[1]
+ return ap.eventHandler.DECSTBM(top, bottom)
+ default:
+ ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
+ return nil
+ }
+
+}
+
+func (ap *AnsiParser) print() error {
+ return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+ ap.context = &ansiContext{}
+ return nil
+}
+
+func (ap *AnsiParser) execute() error {
+ return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
new file mode 100644
index 00000000..f2ea1fcd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/states.go
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+ Enter() error
+ Exit() error
+ Handle(byte) (state, error)
+ Name() string
+ Transition(state) error
+}
+
+type baseState struct {
+ name string
+ parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+ return nil
+}
+
+func (base baseState) Exit() error {
+ return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+ switch {
+ case b == CSI_ENTRY:
+ return base.parser.csiEntry, nil
+ case b == DCS_ENTRY:
+ return base.parser.dcsEntry, nil
+ case b == ANSI_ESCAPE_PRIMARY:
+ return base.parser.escape, nil
+ case b == OSC_STRING:
+ return base.parser.oscString, nil
+ case sliceContains(toGroundBytes, b):
+ return base.parser.ground, nil
+ }
+
+ return nil, nil
+}
+
+func (base baseState) Name() string {
+ return base.name
+}
+
+func (base baseState) Transition(s state) error {
+ if s == base.parser.ground {
+ execBytes := []byte{0x18}
+ execBytes = append(execBytes, 0x1A)
+ execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+ execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+ execBytes = append(execBytes, 0x99)
+ execBytes = append(execBytes, 0x9A)
+
+ if sliceContains(execBytes, base.parser.context.currentChar) {
+ return base.parser.execute()
+ }
+ }
+
+ return nil
+}
+
+type dcsEntryState struct {
+ baseState
+}
+
+type errorState struct {
+ baseState
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
new file mode 100644
index 00000000..39211449
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/utilities.go
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+ "strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+ for _, v := range bytes {
+ if v == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+ s := string(bytes)
+ i, _ := strconv.Atoi(s)
+ return i
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
new file mode 100644
index 00000000..5599082a
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -0,0 +1,196 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/Azure/go-ansiterm"
+ windows "golang.org/x/sys/windows"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+ VK_PRIOR = 0x21 // PAGE UP key
+ VK_NEXT = 0x22 // PAGE DOWN key
+ VK_END = 0x23 // END key
+ VK_HOME = 0x24 // HOME key
+ VK_LEFT = 0x25 // LEFT ARROW key
+ VK_UP = 0x26 // UP ARROW key
+ VK_RIGHT = 0x27 // RIGHT ARROW key
+ VK_DOWN = 0x28 // DOWN ARROW key
+ VK_SELECT = 0x29 // SELECT key
+ VK_PRINT = 0x2A // PRINT key
+ VK_EXECUTE = 0x2B // EXECUTE key
+ VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+ VK_INSERT = 0x2D // INS key
+ VK_DELETE = 0x2E // DEL key
+ VK_HELP = 0x2F // HELP key
+ VK_F1 = 0x70 // F1 key
+ VK_F2 = 0x71 // F2 key
+ VK_F3 = 0x72 // F3 key
+ VK_F4 = 0x73 // F4 key
+ VK_F5 = 0x74 // F5 key
+ VK_F6 = 0x75 // F6 key
+ VK_F7 = 0x76 // F7 key
+ VK_F8 = 0x77 // F8 key
+ VK_F9 = 0x78 // F9 key
+ VK_F10 = 0x79 // F10 key
+ VK_F11 = 0x7A // F11 key
+ VK_F12 = 0x7B // F12 key
+
+ RIGHT_ALT_PRESSED = 0x0001
+ LEFT_ALT_PRESSED = 0x0002
+ RIGHT_CTRL_PRESSED = 0x0004
+ LEFT_CTRL_PRESSED = 0x0008
+ SHIFT_PRESSED = 0x0010
+ NUMLOCK_ON = 0x0020
+ SCROLLLOCK_ON = 0x0040
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+)
+
+type ansiCommand struct {
+ CommandBytes []byte
+ Command string
+ Parameters []string
+ IsSpecial bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+ if isCharacterSelectionCmdChar(command[1]) {
+ // Is Character Set Selection commands
+ return &ansiCommand{
+ CommandBytes: command,
+ Command: string(command),
+ IsSpecial: true,
+ }
+ }
+
+ // last char is command character
+ lastCharIndex := len(command) - 1
+
+ ac := &ansiCommand{
+ CommandBytes: command,
+ Command: string(command[lastCharIndex]),
+ IsSpecial: false,
+ }
+
+ // more than a single escape
+ if lastCharIndex != 0 {
+ start := 1
+ // skip if double char escape sequence
+ if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+ start++
+ }
+ // convert this to GetNextParam method
+ ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+ }
+
+ return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+ if index < 0 || index >= len(ac.Parameters) {
+ return defaultValue
+ }
+
+ param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+ if err != nil {
+ return defaultValue
+ }
+
+ return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+ return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+ bytesToHex(ac.CommandBytes),
+ ac.Command,
+ strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+ switch {
+ case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+ return true
+ case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+ // non-CSI escape sequence terminator
+ return true
+ case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+ // String escape sequence terminator
+ return true
+ }
+ return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+ return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+ return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+ hex := make([]string, len(b))
+ for i, ch := range b {
+ hex[i] = fmt.Sprintf("%X", ch)
+ }
+ return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+ if n < min {
+ return min
+ } else if n > max {
+ return max
+ } else {
+ return n
+ }
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+ var file *os.File
+
+ // syscall uses negative numbers
+ // windows package uses very big uint32
+ // Keep these switches split so we don't have to convert ints too much.
+ switch uint32(nFile) {
+ case windows.STD_INPUT_HANDLE:
+ file = os.Stdin
+ case windows.STD_OUTPUT_HANDLE:
+ file = os.Stdout
+ case windows.STD_ERROR_HANDLE:
+ file = os.Stderr
+ default:
+ switch nFile {
+ case syscall.STD_INPUT_HANDLE:
+ file = os.Stdin
+ case syscall.STD_OUTPUT_HANDLE:
+ file = os.Stdout
+ case syscall.STD_ERROR_HANDLE:
+ file = os.Stderr
+ default:
+ panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+ }
+ }
+
+ fd, err := syscall.GetStdHandle(nFile)
+ if err != nil {
+ panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
+ }
+
+ return file, uintptr(fd)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
new file mode 100644
index 00000000..6055e33b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -0,0 +1,327 @@
+// +build windows
+
+package winterm
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+// variables) the pointers reference *before* the API completes.
+//
+// As a result, in those cases, the code must hint that the variables remain in active by invoking the
+// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+// require unsafe pointers.
+//
+// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+// the garbage collector the variables remain in use if:
+//
+// -- The value is not a pointer (e.g., int32, struct)
+// -- The value is not referenced by the method after passing the pointer to Windows
+//
+// See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+ kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+ getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
+ setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
+ setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
+ setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
+ getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+ setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+ scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+ setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
+ setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
+ writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
+ readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
+ waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+ // Console modes
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+ ENABLE_PROCESSED_INPUT = 0x0001
+ ENABLE_LINE_INPUT = 0x0002
+ ENABLE_ECHO_INPUT = 0x0004
+ ENABLE_WINDOW_INPUT = 0x0008
+ ENABLE_MOUSE_INPUT = 0x0010
+ ENABLE_INSERT_MODE = 0x0020
+ ENABLE_QUICK_EDIT_MODE = 0x0040
+ ENABLE_EXTENDED_FLAGS = 0x0080
+ ENABLE_AUTO_POSITION = 0x0100
+ ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+ ENABLE_PROCESSED_OUTPUT = 0x0001
+ ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+ DISABLE_NEWLINE_AUTO_RETURN = 0x0008
+ ENABLE_LVB_GRID_WORLDWIDE = 0x0010
+
+ // Character attributes
+ // Note:
+ // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+ // Clearing all foreground or background colors results in black; setting all creates white.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+ FOREGROUND_BLUE uint16 = 0x0001
+ FOREGROUND_GREEN uint16 = 0x0002
+ FOREGROUND_RED uint16 = 0x0004
+ FOREGROUND_INTENSITY uint16 = 0x0008
+ FOREGROUND_MASK uint16 = 0x000F
+
+ BACKGROUND_BLUE uint16 = 0x0010
+ BACKGROUND_GREEN uint16 = 0x0020
+ BACKGROUND_RED uint16 = 0x0040
+ BACKGROUND_INTENSITY uint16 = 0x0080
+ BACKGROUND_MASK uint16 = 0x00F0
+
+ COMMON_LVB_MASK uint16 = 0xFF00
+ COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+ COMMON_LVB_UNDERSCORE uint16 = 0x8000
+
+ // Input event types
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ KEY_EVENT = 0x0001
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+ MENU_EVENT = 0x0008
+ FOCUS_EVENT = 0x0010
+
+ // WaitForSingleObject return codes
+ WAIT_ABANDONED = 0x00000080
+ WAIT_FAILED = 0xFFFFFFFF
+ WAIT_SIGNALED = 0x0000000
+ WAIT_TIMEOUT = 0x00000102
+
+ // WaitForSingleObject wait duration
+ WAIT_INFINITE = 0xFFFFFFFF
+ WAIT_ONE_SECOND = 1000
+ WAIT_HALF_SECOND = 500
+ WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+ CHAR_INFO struct {
+ UnicodeChar uint16
+ Attributes uint16
+ }
+
+ CONSOLE_CURSOR_INFO struct {
+ Size uint32
+ Visible int32
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO struct {
+ Size COORD
+ CursorPosition COORD
+ Attributes uint16
+ Window SMALL_RECT
+ MaximumWindowSize COORD
+ }
+
+ COORD struct {
+ X int16
+ Y int16
+ }
+
+ SMALL_RECT struct {
+ Left int16
+ Top int16
+ Right int16
+ Bottom int16
+ }
+
+ // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+ INPUT_RECORD struct {
+ EventType uint16
+ KeyEvent KEY_EVENT_RECORD
+ }
+
+ KEY_EVENT_RECORD struct {
+ KeyDown int32
+ RepeatCount uint16
+ VirtualKeyCode uint16
+ VirtualScanCode uint16
+ UnicodeChar uint16
+ ControlKeyState uint32
+ }
+
+ WINDOW_BUFFER_SIZE struct {
+ Size COORD
+ }
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+ if f {
+ return int32(1)
+ } else {
+ return int32(0)
+ }
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+ r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+ err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+ return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+ r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+ use(mode)
+ return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+ info := CONSOLE_SCREEN_BUFFER_INFO{}
+ err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+ if err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+ r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+ use(scrollRect)
+ use(clipRect)
+ use(destOrigin)
+ use(char)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+ r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+ use(coord)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+ r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+ use(attribute)
+ return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+ r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+ use(isAbsolute)
+ use(rect)
+ return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+ r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+ use(buffer)
+ use(bufferSize)
+ use(bufferCoord)
+ return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+ r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+ use(buffer)
+ return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+ r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+ switch r1 {
+ case WAIT_ABANDONED, WAIT_TIMEOUT:
+ return false, nil
+ case WAIT_SIGNALED:
+ return true, nil
+ }
+ use(msWait)
+ return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+ return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+ return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+ return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+ // Windows APIs return non-zero to indicate success
+ if r1 != 0 {
+ return nil
+ }
+
+ // Return the error if provided, otherwise default to EINVAL
+ if err != nil {
+ return err
+ }
+ return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+ // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+ return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
new file mode 100644
index 00000000..cbec8f72
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+ FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+ BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+ switch ansiMode {
+
+ // Mode styles
+ case ansiterm.ANSI_SGR_BOLD:
+ windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+ windowsMode &^= FOREGROUND_INTENSITY
+
+ case ansiterm.ANSI_SGR_UNDERLINE:
+ windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+ case ansiterm.ANSI_SGR_REVERSE:
+ inverted = true
+
+ case ansiterm.ANSI_SGR_REVERSE_OFF:
+ inverted = false
+
+ case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+ windowsMode &^= COMMON_LVB_UNDERSCORE
+
+ // Foreground colors
+ case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+ windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_FOREGROUND_RED:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+ case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+ windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+ // Background colors
+ case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+ // Black with no intensity
+ windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+ case ansiterm.ANSI_SGR_BACKGROUND_RED:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+ case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+ case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+ case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+ windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+ }
+
+ return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+ return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
new file mode 100644
index 00000000..3ee06ea7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+ horizontal = iota
+ vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+ if h.originMode {
+ sr := h.effectiveSr(info.Window)
+ return SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ } else {
+ return SMALL_RECT{
+ Top: info.Window.Top,
+ Bottom: info.Window.Bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+ }
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+ position.X = ensureInRange(position.X, window.Left, window.Right)
+ position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+ err := SetConsoleCursorPosition(h.fd, position)
+ if err != nil {
+ return err
+ }
+ h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
+ return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+ return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+ return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ switch moveMode {
+ case horizontal:
+ position.X += int16(param)
+ case vertical:
+ position.Y += int16(param)
+ }
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = 0
+ position.Y += int16(param)
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ position := info.CursorPosition
+ position.X = int16(param) - 1
+
+ if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
new file mode 100644
index 00000000..244b5fa2
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ // Ignore an invalid (negative area) request
+ if toCoord.Y < fromCoord.Y {
+ return nil
+ }
+
+ var err error
+
+ var coordStart = COORD{}
+ var coordEnd = COORD{}
+
+ xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+ xEnd, yEnd := toCoord.X, toCoord.Y
+
+ // Clear any partial initial line
+ if xCurrent > 0 {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent += 1
+ }
+
+ // Clear intervening rectangular section
+ if yCurrent < yEnd {
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ xCurrent = 0
+ yCurrent = yEnd
+ }
+
+ // Clear remaining partial ending line
+ coordStart.X, coordStart.Y = xCurrent, yCurrent
+ coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+ err = h.clearRect(attributes, coordStart, coordEnd)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+ region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+ width := toCoord.X - fromCoord.X + 1
+ height := toCoord.Y - fromCoord.Y + 1
+ size := uint32(width) * uint32(height)
+
+ if size <= 0 {
+ return nil
+ }
+
+ buffer := make([]CHAR_INFO, size)
+
+ char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+ for i := 0; i < int(size); i++ {
+ buffer[i] = char
+ }
+
+ err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
new file mode 100644
index 00000000..2d27fa1d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+ top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+ bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+ if top >= bottom {
+ top = window.Top
+ bottom = window.Bottom
+ }
+ return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+ return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ start := info.CursorPosition.Y
+ sr := h.effectiveSr(info.Window)
+ // Lines cannot be inserted or deleted outside the scrolling region.
+ if start >= sr.top && start <= sr.bottom {
+ sr.top = start
+ return h.scroll(param, sr, info)
+ } else {
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+ return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+ h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: sr.top,
+ Bottom: sr.bottom,
+ Left: 0,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: 0,
+ Y: sr.top - int16(param),
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+ return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+ // Copy from and clip to the scroll region (full buffer width)
+ scrollRect := SMALL_RECT{
+ Top: position.Y,
+ Bottom: position.Y,
+ Left: position.X,
+ Right: info.Size.X - 1,
+ }
+
+ // Origin to which area should be copied
+ destOrigin := COORD{
+ X: position.X - int16(columns),
+ Y: position.Y,
+ }
+
+ char := CHAR_INFO{
+ UnicodeChar: ' ',
+ Attributes: h.attributes,
+ }
+
+ if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
new file mode 100644
index 00000000..afa7635d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+ return ensureInRange(n+increment, min, max)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
new file mode 100644
index 00000000..2d40fb75
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -0,0 +1,743 @@
+// +build windows
+
+package winterm
+
+import (
+ "bytes"
+ "log"
+ "os"
+ "strconv"
+
+ "github.com/Azure/go-ansiterm"
+)
+
+type windowsAnsiEventHandler struct {
+ fd uintptr
+ file *os.File
+ infoReset *CONSOLE_SCREEN_BUFFER_INFO
+ sr scrollRegion
+ buffer bytes.Buffer
+ attributes uint16
+ inverted bool
+ wrapNext bool
+ drewMarginByte bool
+ originMode bool
+ marginByte byte
+ curInfo *CONSOLE_SCREEN_BUFFER_INFO
+ curPos COORD
+ logf func(string, ...interface{})
+}
+
+type Option func(*windowsAnsiEventHandler)
+
+func WithLogf(f func(string, ...interface{})) Option {
+ return func(w *windowsAnsiEventHandler) {
+ w.logf = f
+ }
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
+ infoReset, err := GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ h := &windowsAnsiEventHandler{
+ fd: fd,
+ file: file,
+ infoReset: infoReset,
+ attributes: infoReset.Attributes,
+ }
+ for _, o := range opts {
+ o(h)
+ }
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ := os.Create("winEventHandler.log")
+ logger := log.New(logFile, "", log.LstdFlags)
+ if h.logf != nil {
+ l := h.logf
+ h.logf = func(s string, v ...interface{}) {
+ l(s, v...)
+ logger.Printf(s, v...)
+ }
+ } else {
+ h.logf = logger.Printf
+ }
+ }
+
+ if h.logf == nil {
+ h.logf = func(string, ...interface{}) {}
+ }
+
+ return h
+}
+
+type scrollRegion struct {
+ top int16
+ bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ h.clearWrap()
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return false, err
+ }
+ sr := h.effectiveSr(info.Window)
+ if pos.Y == sr.bottom {
+ // Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+ // is the full window.
+ if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+ if includeCR {
+ pos.X = 0
+ h.updatePos(pos)
+ }
+ return false, nil
+ }
+
+ // A custom scroll region is active. Scroll the window manually to simulate
+ // the LF.
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ h.logf("Simulating LF inside scroll region")
+ if err := h.scrollUp(1); err != nil {
+ return false, err
+ }
+ if includeCR {
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+
+ } else if pos.Y < info.Window.Bottom {
+ // Let Windows handle the LF.
+ pos.Y++
+ if includeCR {
+ pos.X = 0
+ }
+ h.updatePos(pos)
+ return false, nil
+ } else {
+ // The cursor is at the bottom of the screen but outside the scroll
+ // region. Skip the LF.
+ h.logf("Simulating LF outside scroll region")
+ if includeCR {
+ if err := h.Flush(); err != nil {
+ return false, err
+ }
+ pos.X = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return false, err
+ }
+ }
+ return true, nil
+ }
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+ handled, err := h.simulateLF(false)
+ if err != nil {
+ return err
+ }
+ if !handled {
+ // Windows LF will reset the cursor column position. Write the LF
+ // and restore the cursor position.
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+ if pos.X != 0 {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("Resetting cursor position for LF without CR")
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+ if h.wrapNext {
+ h.buffer.WriteByte(h.marginByte)
+ h.clearWrap()
+ if _, err := h.simulateLF(true); err != nil {
+ return err
+ }
+ }
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X == info.Size.X-1 {
+ h.wrapNext = true
+ h.marginByte = b
+ } else {
+ pos.X++
+ h.updatePos(pos)
+ h.buffer.WriteByte(b)
+ }
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+ switch b {
+ case ansiterm.ANSI_TAB:
+ h.logf("Execute(TAB)")
+ // Move to the next tab stop, but preserve auto-wrap if already set.
+ if !h.wrapNext {
+ pos, info, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ pos.X = (pos.X + 8) - pos.X%8
+ if pos.X >= info.Size.X {
+ pos.X = info.Size.X - 1
+ }
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case ansiterm.ANSI_BEL:
+ h.buffer.WriteByte(ansiterm.ANSI_BEL)
+ return nil
+
+ case ansiterm.ANSI_BACKSPACE:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X > 0 {
+ pos.X--
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+ }
+ return nil
+
+ case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+ // Treat as true LF.
+ return h.executeLF()
+
+ case ansiterm.ANSI_LINE_FEED:
+ // Simulate a CR and LF for now since there is no way in go-ansiterm
+ // to tell if the LF should include CR (and more things break when it's
+ // missing than when it's incorrectly added).
+ handled, err := h.simulateLF(true)
+ if handled || err != nil {
+ return err
+ }
+ return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+ case ansiterm.ANSI_CARRIAGE_RETURN:
+ if h.wrapNext {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.clearWrap()
+ }
+ pos, _, err := h.getCurrentInfo()
+ if err != nil {
+ return err
+ }
+ if pos.X != 0 {
+ pos.X = 0
+ h.updatePos(pos)
+ h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+ }
+ return nil
+
+ default:
+ return nil
+ }
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("VPA: [[%d]]", param)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ window := h.getCursorWindow(info)
+ position := info.CursorPosition
+ position.Y = window.Top + int16(param) - 1
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("CUP: [[%d %d]]", row, col)
+ h.clearWrap()
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ window := h.getCursorWindow(info)
+ position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+ return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("HVP: [[%d %d]]", row, col)
+ h.clearWrap()
+ return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+ h.clearWrap()
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+ h.clearWrap()
+ h.originMode = enable
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+ h.clearWrap()
+ if err := h.ED(2); err != nil {
+ return err
+ }
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+ targetWidth := int16(80)
+ if use132 {
+ targetWidth = 132
+ }
+ if info.Size.X < targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ h.logf("set buffer failed: %v", err)
+ return err
+ }
+ }
+ window := info.Window
+ window.Left = 0
+ window.Right = targetWidth - 1
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ h.logf("set window failed: %v", err)
+ return err
+ }
+ if info.Size.X > targetWidth {
+ if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+ h.logf("set buffer failed: %v", err)
+ return err
+ }
+ }
+ return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("ED: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+
+ // [J -- Erases from the cursor to the end of the screen, including the cursor position.
+ // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+ // [2J -- Erases the complete display. The cursor does not move.
+ // Notes:
+ // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+ case 1:
+ start = COORD{0, 0}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, 0}
+ end = COORD{info.Size.X - 1, info.Size.Y - 1}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ // If the whole buffer was cleared, move the window to the top while preserving
+ // the window-relative cursor position.
+ if param == 2 {
+ pos := info.CursorPosition
+ window := info.Window
+ pos.Y -= window.Top
+ window.Bottom -= window.Top
+ window.Top = 0
+ if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+ return err
+ }
+ if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("EL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+
+ // [K -- Erases from the cursor to the end of the line, including the cursor position.
+ // [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+ // [2K -- Erases the complete line.
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ var start COORD
+ var end COORD
+
+ switch param {
+ case 0:
+ start = info.CursorPosition
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+
+ case 1:
+ start = COORD{0, info.CursorPosition.Y}
+ end = info.CursorPosition
+
+ case 2:
+ start = COORD{0, info.CursorPosition.Y}
+ end = COORD{info.Size.X, info.CursorPosition.Y}
+ }
+
+ err = h.clearRange(h.attributes, start, end)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("IL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DL: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("ICH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DCH: [%v]", strconv.Itoa(param))
+ h.clearWrap()
+ return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ strings := []string{}
+ for _, v := range params {
+ strings = append(strings, strconv.Itoa(v))
+ }
+
+ h.logf("SGR: [%v]", strings)
+
+ if len(params) <= 0 {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ } else {
+ for _, attr := range params {
+
+ if attr == ansiterm.ANSI_SGR_RESET {
+ h.attributes = h.infoReset.Attributes
+ h.inverted = false
+ continue
+ }
+
+ h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+ }
+ }
+
+ attributes := h.attributes
+ if h.inverted {
+ attributes = invertAttributes(attributes)
+ }
+ err := SetConsoleTextAttribute(h.fd, attributes)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("SU: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("SD: [%v]", []string{strconv.Itoa(param)})
+ h.clearWrap()
+ return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+ h.logf("DA: [%v]", params)
+ // DA cannot be implemented because it must send data on the VT100 input stream,
+ // which is not available to go-ansiterm.
+ return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("DECSTBM: [%d, %d]", top, bottom)
+
+ // Windows is 0 indexed, Linux is 1 indexed
+ h.sr.top = int16(top - 1)
+ h.sr.bottom = int16(bottom - 1)
+
+ // This command also moves the cursor to the origin.
+ h.clearWrap()
+ return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+ if err := h.Flush(); err != nil {
+ return err
+ }
+ h.logf("RI: []")
+ h.clearWrap()
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ sr := h.effectiveSr(info.Window)
+ if info.CursorPosition.Y == sr.top {
+ return h.scrollDown(1)
+ }
+
+ return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+ h.logf("IND: []")
+ return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+ h.curInfo = nil
+ if h.buffer.Len() > 0 {
+ h.logf("Flush: [%s]", h.buffer.Bytes())
+ if _, err := h.buffer.WriteTo(h.file); err != nil {
+ return err
+ }
+ }
+
+ if h.wrapNext && !h.drewMarginByte {
+ h.logf("Flush: drawing margin byte '%c'", h.marginByte)
+
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return err
+ }
+
+ charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+ size := COORD{1, 1}
+ position := COORD{0, 0}
+ region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+ if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil {
+ return err
+ }
+ h.drewMarginByte = true
+ }
+ return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+ if h.curInfo == nil {
+ info, err := GetConsoleScreenBufferInfo(h.fd)
+ if err != nil {
+ return COORD{}, nil, err
+ }
+ h.curInfo = info
+ h.curPos = info.CursorPosition
+ }
+ return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+ if h.curInfo == nil {
+ panic("failed to call getCurrentInfo before calling updatePos")
+ }
+ h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+ h.wrapNext = false
+ h.drewMarginByte = false
+}
diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes
new file mode 100644
index 00000000..94f480de
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitattributes
@@ -0,0 +1 @@
+* text=auto eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore
new file mode 100644
index 00000000..815e2066
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.gitignore
@@ -0,0 +1,10 @@
+.vscode/
+
+*.exe
+
+# testing
+testdata
+
+# go workspaces
+go.work
+go.work.sum
diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml
new file mode 100644
index 00000000..faedfe93
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml
@@ -0,0 +1,147 @@
+linters:
+ enable:
+ # style
+ - containedctx # struct contains a context
+ - dupl # duplicate code
+ - errname # erorrs are named correctly
+ - nolintlint # "//nolint" directives are properly explained
+ - revive # golint replacement
+ - unconvert # unnecessary conversions
+ - wastedassign
+
+ # bugs, performance, unused, etc ...
+ - contextcheck # function uses a non-inherited context
+ - errorlint # errors not wrapped for 1.13
+ - exhaustive # check exhaustiveness of enum switch statements
+ - gofmt # files are gofmt'ed
+ - gosec # security
+ - nilerr # returns nil even with non-nil error
+ - thelper # test helpers without t.Helper()
+ - unparam # unused function params
+
+issues:
+ exclude-dirs:
+ - pkg/etw/sample
+
+ exclude-rules:
+ # err is very often shadowed in nested scopes
+ - linters:
+ - govet
+ text: '^shadow: declaration of "err" shadows declaration'
+
+ # ignore long lines for skip autogen directives
+ - linters:
+ - revive
+ text: "^line-length-limit: "
+ source: "^//(go:generate|sys) "
+
+ #TODO: remove after upgrading to go1.18
+ # ignore comment spacing for nolint and sys directives
+ - linters:
+ - revive
+ text: "^comment-spacings: no space between comment delimiter and comment text"
+ source: "//(cspell:|nolint:|sys |todo)"
+
+ # not on go 1.18 yet, so no any
+ - linters:
+ - revive
+ text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
+
+ # allow unjustified ignores of error checks in defer statements
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errcheck` should provide explanation"
+ source: '^\s*defer '
+
+ # allow unjustified ignores of error lints for io.EOF
+ - linters:
+ - nolintlint
+ text: "^directive `//nolint:errorlint` should provide explanation"
+ source: '[=|!]= io.EOF'
+
+
+linters-settings:
+ exhaustive:
+ default-signifies-exhaustive: true
+ govet:
+ enable-all: true
+ disable:
+ # struct order is often for Win32 compat
+ # also, ignore pointer bytes/GC issues for now until performance becomes an issue
+ - fieldalignment
+ nolintlint:
+ require-explanation: true
+ require-specific: true
+ revive:
+ # revive is more configurable than static check, so likely the preferred alternative to static-check
+ # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997)
+ enable-all-rules:
+ true
+ # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
+ rules:
+ # rules with required arguments
+ - name: argument-limit
+ disabled: true
+ - name: banned-characters
+ disabled: true
+ - name: cognitive-complexity
+ disabled: true
+ - name: cyclomatic
+ disabled: true
+ - name: file-header
+ disabled: true
+ - name: function-length
+ disabled: true
+ - name: function-result-limit
+ disabled: true
+ - name: max-public-structs
+ disabled: true
+ # geneally annoying rules
+ - name: add-constant # complains about any and all strings and integers
+ disabled: true
+ - name: confusing-naming # we frequently use "Foo()" and "foo()" together
+ disabled: true
+ - name: flag-parameter # excessive, and a common idiom we use
+ disabled: true
+ - name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
+ disabled: true
+ # general config
+ - name: line-length-limit
+ arguments:
+ - 140
+ - name: var-naming
+ arguments:
+ - []
+ - - CID
+ - CRI
+ - CTRD
+ - DACL
+ - DLL
+ - DOS
+ - ETW
+ - FSCTL
+ - GCS
+ - GMSA
+ - HCS
+ - HV
+ - IO
+ - LCOW
+ - LDAP
+ - LPAC
+ - LTSC
+ - MMIO
+ - NT
+ - OCI
+ - PMEM
+ - PWSH
+ - RX
+ - SACl
+ - SID
+ - SMB
+ - TX
+ - VHD
+ - VHDX
+ - VMID
+ - VPCI
+ - WCOW
+ - WIM
diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS
new file mode 100644
index 00000000..ae1b4942
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS
@@ -0,0 +1 @@
+ * @microsoft/containerplat
diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE
new file mode 100644
index 00000000..b8b569d7
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md
new file mode 100644
index 00000000..7474b4f0
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/README.md
@@ -0,0 +1,89 @@
+# go-winio [](https://github.com/microsoft/go-winio/actions/workflows/ci.yml)
+
+This repository contains utilities for efficiently performing Win32 IO operations in
+Go. Currently, this is focused on accessing named pipes and other file handles, and
+for using named pipes as a net transport.
+
+This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
+to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
+newer operating systems. This is similar to the implementation of network sockets in Go's net
+package.
+
+Please see the LICENSE file for licensing information.
+
+## Contributing
+
+This project welcomes contributions and suggestions.
+Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that
+you have the right to, and actually do, grant us the rights to use your contribution.
+For details, visit [Microsoft CLA](https://cla.microsoft.com).
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to
+provide a CLA and decorate the PR appropriately (e.g., label, comment).
+Simply follow the instructions provided by the bot.
+You will only need to do this once across all repos using our CLA.
+
+Additionally, the pull request pipeline requires the following steps to be performed before
+mergining.
+
+### Code Sign-Off
+
+We require that contributors sign their commits using [`git commit --signoff`][git-commit-s]
+to certify they either authored the work themselves or otherwise have permission to use it in this project.
+
+A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
+
+Please see [the developer certificate](https://developercertificate.org) for more info,
+as well as to make sure that you can attest to the rules listed.
+Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
+
+### Linting
+
+Code must pass a linting stage, which uses [`golangci-lint`][lint].
+The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
+automatically with VSCode by adding the following to your workspace or folder settings:
+
+```json
+ "go.lintTool": "golangci-lint",
+ "go.lintOnSave": "package",
+```
+
+Additional editor [integrations options are also available][lint-ide].
+
+Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root:
+
+```shell
+# use . or specify a path to only lint a package
+# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
+> golangci-lint run ./...
+```
+
+### Go Generate
+
+The pipeline checks that auto-generated code, via `go generate`, are up to date.
+
+This can be done for the entire repo:
+
+```shell
+> go generate ./...
+```
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## Special Thanks
+
+Thanks to [natefinch][natefinch] for the inspiration for this library.
+See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation.
+
+[lint]: https://golangci-lint.run/
+[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
+[lint-install]: https://golangci-lint.run/usage/install/#local-installation
+
+[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
+[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
+
+[natefinch]: https://github.com/natefinch
diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md
new file mode 100644
index 00000000..869fdfe2
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go
new file mode 100644
index 00000000..b54341da
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/backup.go
@@ -0,0 +1,287 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "unicode/utf16"
+
+ "github.com/Microsoft/go-winio/internal/fs"
+ "golang.org/x/sys/windows"
+)
+
+//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
+//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
+
+const (
+ BackupData = uint32(iota + 1)
+ BackupEaData
+ BackupSecurity
+ BackupAlternateData
+ BackupLink
+ BackupPropertyData
+ BackupObjectId //revive:disable-line:var-naming ID, not Id
+ BackupReparseData
+ BackupSparseBlock
+ BackupTxfsData
+)
+
+const (
+ StreamSparseAttributes = uint32(8)
+)
+
+//nolint:revive // var-naming: ALL_CAPS
+const (
+ WRITE_DAC = windows.WRITE_DAC
+ WRITE_OWNER = windows.WRITE_OWNER
+ ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY
+)
+
+// BackupHeader represents a backup stream of a file.
+type BackupHeader struct {
+ //revive:disable-next-line:var-naming ID, not Id
+ Id uint32 // The backup stream ID
+ Attributes uint32 // Stream attributes
+ Size int64 // The size of the stream in bytes
+ Name string // The name of the stream (for BackupAlternateData only).
+ Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
+}
+
+type win32StreamID struct {
+ StreamID uint32
+ Attributes uint32
+ Size uint64
+ NameSize uint32
+}
+
+// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
+// of BackupHeader values.
+type BackupStreamReader struct {
+ r io.Reader
+ bytesLeft int64
+}
+
+// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
+func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
+ return &BackupStreamReader{r, 0}
+}
+
+// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
+// it was not completely read.
+func (r *BackupStreamReader) Next() (*BackupHeader, error) {
+ if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this
+ if s, ok := r.r.(io.Seeker); ok {
+ // Make sure Seek on io.SeekCurrent sometimes succeeds
+ // before trying the actual seek.
+ if _, err := s.Seek(0, io.SeekCurrent); err == nil {
+ if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
+ return nil, err
+ }
+ r.bytesLeft = 0
+ }
+ }
+ if _, err := io.Copy(io.Discard, r); err != nil {
+ return nil, err
+ }
+ }
+ var wsi win32StreamID
+ if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
+ return nil, err
+ }
+ hdr := &BackupHeader{
+ Id: wsi.StreamID,
+ Attributes: wsi.Attributes,
+ Size: int64(wsi.Size),
+ }
+ if wsi.NameSize != 0 {
+ name := make([]uint16, int(wsi.NameSize/2))
+ if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
+ return nil, err
+ }
+ hdr.Name = windows.UTF16ToString(name)
+ }
+ if wsi.StreamID == BackupSparseBlock {
+ if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
+ return nil, err
+ }
+ hdr.Size -= 8
+ }
+ r.bytesLeft = hdr.Size
+ return hdr, nil
+}
+
+// Read reads from the current backup stream.
+func (r *BackupStreamReader) Read(b []byte) (int, error) {
+ if r.bytesLeft == 0 {
+ return 0, io.EOF
+ }
+ if int64(len(b)) > r.bytesLeft {
+ b = b[:r.bytesLeft]
+ }
+ n, err := r.r.Read(b)
+ r.bytesLeft -= int64(n)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ } else if r.bytesLeft == 0 && err == nil {
+ err = io.EOF
+ }
+ return n, err
+}
+
+// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
+type BackupStreamWriter struct {
+ w io.Writer
+ bytesLeft int64
+}
+
+// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
+func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
+ return &BackupStreamWriter{w, 0}
+}
+
+// WriteHeader writes the next backup stream header and prepares for calls to Write().
+func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
+ if w.bytesLeft != 0 {
+ return fmt.Errorf("missing %d bytes", w.bytesLeft)
+ }
+ name := utf16.Encode([]rune(hdr.Name))
+ wsi := win32StreamID{
+ StreamID: hdr.Id,
+ Attributes: hdr.Attributes,
+ Size: uint64(hdr.Size),
+ NameSize: uint32(len(name) * 2),
+ }
+ if hdr.Id == BackupSparseBlock {
+ // Include space for the int64 block offset
+ wsi.Size += 8
+ }
+ if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
+ return err
+ }
+ if len(name) != 0 {
+ if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
+ return err
+ }
+ }
+ if hdr.Id == BackupSparseBlock {
+ if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
+ return err
+ }
+ }
+ w.bytesLeft = hdr.Size
+ return nil
+}
+
+// Write writes to the current backup stream.
+func (w *BackupStreamWriter) Write(b []byte) (int, error) {
+ if w.bytesLeft < int64(len(b)) {
+ return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
+ }
+ n, err := w.w.Write(b)
+ w.bytesLeft -= int64(n)
+ return n, err
+}
+
+// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
+type BackupFileReader struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
+// Read will attempt to read the security descriptor of the file.
+func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
+ r := &BackupFileReader{f, includeSecurity, 0}
+ return r
+}
+
+// Read reads a backup stream from the file by calling the Win32 API BackupRead().
+func (r *BackupFileReader) Read(b []byte) (int, error) {
+ var bytesRead uint32
+ err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
+ if err != nil {
+ return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err}
+ }
+ runtime.KeepAlive(r.f)
+ if bytesRead == 0 {
+ return 0, io.EOF
+ }
+ return int(bytesRead), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileReader. It does not close
+// the underlying file.
+func (r *BackupFileReader) Close() error {
+ if r.ctx != 0 {
+ _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
+ runtime.KeepAlive(r.f)
+ r.ctx = 0
+ }
+ return nil
+}
+
+// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
+type BackupFileWriter struct {
+ f *os.File
+ includeSecurity bool
+ ctx uintptr
+}
+
+// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
+// Write() will attempt to restore the security descriptor from the stream.
+func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
+ w := &BackupFileWriter{f, includeSecurity, 0}
+ return w
+}
+
+// Write restores a portion of the file using the provided backup stream.
+func (w *BackupFileWriter) Write(b []byte) (int, error) {
+ var bytesWritten uint32
+ err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
+ if err != nil {
+ return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err}
+ }
+ runtime.KeepAlive(w.f)
+ if int(bytesWritten) != len(b) {
+ return int(bytesWritten), errors.New("not all bytes could be written")
+ }
+ return len(b), nil
+}
+
+// Close frees Win32 resources associated with the BackupFileWriter. It does not
+// close the underlying file.
+func (w *BackupFileWriter) Close() error {
+ if w.ctx != 0 {
+ _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
+ runtime.KeepAlive(w.f)
+ w.ctx = 0
+ }
+ return nil
+}
+
+// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
+// or restore privileges have been acquired.
+//
+// If the file opened was a directory, it cannot be used with Readdir().
+func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
+ h, err := fs.CreateFile(path,
+ fs.AccessMask(access),
+ fs.FileShareMode(share),
+ nil,
+ fs.FileCreationDisposition(createmode),
+ fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT,
+ 0,
+ )
+ if err != nil {
+ err = &os.PathError{Op: "open", Path: path, Err: err}
+ return nil, err
+ }
+ return os.NewFile(uintptr(h), path), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go
new file mode 100644
index 00000000..1f5bfe2d
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/doc.go
@@ -0,0 +1,22 @@
+// This package provides utilities for efficiently performing Win32 IO operations in Go.
+// Currently, this package is provides support for genreal IO and management of
+// - named pipes
+// - files
+// - [Hyper-V sockets]
+//
+// This code is similar to Go's [net] package, and uses IO completion ports to avoid
+// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines.
+//
+// This limits support to Windows Vista and newer operating systems.
+//
+// Additionally, this package provides support for:
+// - creating and managing GUIDs
+// - writing to [ETW]
+// - opening and manageing VHDs
+// - parsing [Windows Image files]
+// - auto-generating Win32 API code
+//
+// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service
+// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw-
+// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images
+package winio
diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go
new file mode 100644
index 00000000..e104dbdf
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/ea.go
@@ -0,0 +1,137 @@
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+)
+
+type fileFullEaInformation struct {
+ NextEntryOffset uint32
+ Flags uint8
+ NameLength uint8
+ ValueLength uint16
+}
+
+var (
+ fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
+
+ errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
+ errEaNameTooLarge = errors.New("extended attribute name too large")
+ errEaValueTooLarge = errors.New("extended attribute value too large")
+)
+
+// ExtendedAttribute represents a single Windows EA.
+type ExtendedAttribute struct {
+ Name string
+ Value []byte
+ Flags uint8
+}
+
+func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
+ var info fileFullEaInformation
+ err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
+ if err != nil {
+ err = errInvalidEaBuffer
+ return ea, nb, err
+ }
+
+ nameOffset := fileFullEaInformationSize
+ nameLen := int(info.NameLength)
+ valueOffset := nameOffset + int(info.NameLength) + 1
+ valueLen := int(info.ValueLength)
+ nextOffset := int(info.NextEntryOffset)
+ if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
+ err = errInvalidEaBuffer
+ return ea, nb, err
+ }
+
+ ea.Name = string(b[nameOffset : nameOffset+nameLen])
+ ea.Value = b[valueOffset : valueOffset+valueLen]
+ ea.Flags = info.Flags
+ if info.NextEntryOffset != 0 {
+ nb = b[info.NextEntryOffset:]
+ }
+ return ea, nb, err
+}
+
+// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
+// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
+func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
+ for len(b) != 0 {
+ ea, nb, err := parseEa(b)
+ if err != nil {
+ return nil, err
+ }
+
+ eas = append(eas, ea)
+ b = nb
+ }
+ return eas, err
+}
+
+func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
+ if int(uint8(len(ea.Name))) != len(ea.Name) {
+ return errEaNameTooLarge
+ }
+ if int(uint16(len(ea.Value))) != len(ea.Value) {
+ return errEaValueTooLarge
+ }
+ entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
+ withPadding := (entrySize + 3) &^ 3
+ nextOffset := uint32(0)
+ if !last {
+ nextOffset = withPadding
+ }
+ info := fileFullEaInformation{
+ NextEntryOffset: nextOffset,
+ Flags: ea.Flags,
+ NameLength: uint8(len(ea.Name)),
+ ValueLength: uint16(len(ea.Value)),
+ }
+
+ err := binary.Write(buf, binary.LittleEndian, &info)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte(ea.Name))
+ if err != nil {
+ return err
+ }
+
+ err = buf.WriteByte(0)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write(ea.Value)
+ if err != nil {
+ return err
+ }
+
+ _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
+// buffer for use with BackupWrite, ZwSetEaFile, etc.
+func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
+ var buf bytes.Buffer
+ for i := range eas {
+ last := false
+ if i == len(eas)-1 {
+ last = true
+ }
+
+ err := writeEa(&buf, &eas[i], last)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go
new file mode 100644
index 00000000..fe82a180
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/file.go
@@ -0,0 +1,320 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "io"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx
+//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort
+//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
+//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
+//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
+
+var (
+ ErrFileClosed = errors.New("file has already been closed")
+ ErrTimeout = &timeoutError{}
+)
+
+type timeoutError struct{}
+
+func (*timeoutError) Error() string { return "i/o timeout" }
+func (*timeoutError) Timeout() bool { return true }
+func (*timeoutError) Temporary() bool { return true }
+
+type timeoutChan chan struct{}
+
+var ioInitOnce sync.Once
+var ioCompletionPort windows.Handle
+
+// ioResult contains the result of an asynchronous IO operation.
+type ioResult struct {
+ bytes uint32
+ err error
+}
+
+// ioOperation represents an outstanding asynchronous Win32 IO.
+type ioOperation struct {
+ o windows.Overlapped
+ ch chan ioResult
+}
+
+func initIO() {
+ h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
+ if err != nil {
+ panic(err)
+ }
+ ioCompletionPort = h
+ go ioCompletionProcessor(h)
+}
+
+// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
+// It takes ownership of this handle and will close it if it is garbage collected.
+type win32File struct {
+ handle windows.Handle
+ wg sync.WaitGroup
+ wgLock sync.RWMutex
+ closing atomic.Bool
+ socket bool
+ readDeadline deadlineHandler
+ writeDeadline deadlineHandler
+}
+
+type deadlineHandler struct {
+ setLock sync.Mutex
+ channel timeoutChan
+ channelLock sync.RWMutex
+ timer *time.Timer
+ timedout atomic.Bool
+}
+
+// makeWin32File makes a new win32File from an existing file handle.
+func makeWin32File(h windows.Handle) (*win32File, error) {
+ f := &win32File{handle: h}
+ ioInitOnce.Do(initIO)
+ _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff)
+ if err != nil {
+ return nil, err
+ }
+ err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE)
+ if err != nil {
+ return nil, err
+ }
+ f.readDeadline.channel = make(timeoutChan)
+ f.writeDeadline.channel = make(timeoutChan)
+ return f, nil
+}
+
+// Deprecated: use NewOpenFile instead.
+func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
+ return NewOpenFile(windows.Handle(h))
+}
+
+func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) {
+ // If we return the result of makeWin32File directly, it can result in an
+ // interface-wrapped nil, rather than a nil interface value.
+ f, err := makeWin32File(h)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// closeHandle closes the resources associated with a Win32 handle.
+func (f *win32File) closeHandle() {
+ f.wgLock.Lock()
+ // Atomically set that we are closing, releasing the resources only once.
+ if !f.closing.Swap(true) {
+ f.wgLock.Unlock()
+ // cancel all IO and wait for it to complete
+ _ = cancelIoEx(f.handle, nil)
+ f.wg.Wait()
+ // at this point, no new IO can start
+ windows.Close(f.handle)
+ f.handle = 0
+ } else {
+ f.wgLock.Unlock()
+ }
+}
+
+// Close closes a win32File.
+func (f *win32File) Close() error {
+ f.closeHandle()
+ return nil
+}
+
+// IsClosed checks if the file has been closed.
+func (f *win32File) IsClosed() bool {
+ return f.closing.Load()
+}
+
+// prepareIO prepares for a new IO operation.
+// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
+func (f *win32File) prepareIO() (*ioOperation, error) {
+ f.wgLock.RLock()
+ if f.closing.Load() {
+ f.wgLock.RUnlock()
+ return nil, ErrFileClosed
+ }
+ f.wg.Add(1)
+ f.wgLock.RUnlock()
+ c := &ioOperation{}
+ c.ch = make(chan ioResult)
+ return c, nil
+}
+
+// ioCompletionProcessor processes completed async IOs forever.
+func ioCompletionProcessor(h windows.Handle) {
+ for {
+ var bytes uint32
+ var key uintptr
+ var op *ioOperation
+ err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE)
+ if op == nil {
+ panic(err)
+ }
+ op.ch <- ioResult{bytes, err}
+ }
+}
+
+// todo: helsaawy - create an asyncIO version that takes a context
+
+// asyncIO processes the return value from ReadFile or WriteFile, blocking until
+// the operation has actually completed.
+func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) {
+ if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno
+ return int(bytes), err
+ }
+
+ if f.closing.Load() {
+ _ = cancelIoEx(f.handle, &c.o)
+ }
+
+ var timeout timeoutChan
+ if d != nil {
+ d.channelLock.Lock()
+ timeout = d.channel
+ d.channelLock.Unlock()
+ }
+
+ var r ioResult
+ select {
+ case r = <-c.ch:
+ err = r.err
+ if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
+ if f.closing.Load() {
+ err = ErrFileClosed
+ }
+ } else if err != nil && f.socket {
+ // err is from Win32. Query the overlapped structure to get the winsock error.
+ var bytes, flags uint32
+ err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
+ }
+ case <-timeout:
+ _ = cancelIoEx(f.handle, &c.o)
+ r = <-c.ch
+ err = r.err
+ if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno
+ err = ErrTimeout
+ }
+ }
+
+ // runtime.KeepAlive is needed, as c is passed via native
+ // code to ioCompletionProcessor, c must remain alive
+ // until the channel read is complete.
+ // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive?
+ runtime.KeepAlive(c)
+ return int(r.bytes), err
+}
+
+// Read reads from a file handle.
+func (f *win32File) Read(b []byte) (int, error) {
+ c, err := f.prepareIO()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.readDeadline.timedout.Load() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = windows.ReadFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIO(c, &f.readDeadline, bytes, err)
+ runtime.KeepAlive(b)
+
+ // Handle EOF conditions.
+ if err == nil && n == 0 && len(b) != 0 {
+ return 0, io.EOF
+ } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno
+ return 0, io.EOF
+ }
+ return n, err
+}
+
+// Write writes to a file handle.
+func (f *win32File) Write(b []byte) (int, error) {
+ c, err := f.prepareIO()
+ if err != nil {
+ return 0, err
+ }
+ defer f.wg.Done()
+
+ if f.writeDeadline.timedout.Load() {
+ return 0, ErrTimeout
+ }
+
+ var bytes uint32
+ err = windows.WriteFile(f.handle, b, &bytes, &c.o)
+ n, err := f.asyncIO(c, &f.writeDeadline, bytes, err)
+ runtime.KeepAlive(b)
+ return n, err
+}
+
+func (f *win32File) SetReadDeadline(deadline time.Time) error {
+ return f.readDeadline.set(deadline)
+}
+
+func (f *win32File) SetWriteDeadline(deadline time.Time) error {
+ return f.writeDeadline.set(deadline)
+}
+
+func (f *win32File) Flush() error {
+ return windows.FlushFileBuffers(f.handle)
+}
+
+func (f *win32File) Fd() uintptr {
+ return uintptr(f.handle)
+}
+
+func (d *deadlineHandler) set(deadline time.Time) error {
+ d.setLock.Lock()
+ defer d.setLock.Unlock()
+
+ if d.timer != nil {
+ if !d.timer.Stop() {
+ <-d.channel
+ }
+ d.timer = nil
+ }
+ d.timedout.Store(false)
+
+ select {
+ case <-d.channel:
+ d.channelLock.Lock()
+ d.channel = make(chan struct{})
+ d.channelLock.Unlock()
+ default:
+ }
+
+ if deadline.IsZero() {
+ return nil
+ }
+
+ timeoutIO := func() {
+ d.timedout.Store(true)
+ close(d.channel)
+ }
+
+ now := time.Now()
+ duration := deadline.Sub(now)
+ if deadline.After(now) {
+ // Deadline is in the future, set a timer to wait
+ d.timer = time.AfterFunc(duration, timeoutIO)
+ } else {
+ // Deadline is in the past. Cancel all pending IO now.
+ timeoutIO()
+ }
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go
new file mode 100644
index 00000000..c860eb99
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go
@@ -0,0 +1,106 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "os"
+ "runtime"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+// FileBasicInfo contains file access time and file attributes information.
+type FileBasicInfo struct {
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime
+ FileAttributes uint32
+ _ uint32 // padding
+}
+
+// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing
+// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64
+// alignment is necessary to pass this as FILE_BASIC_INFO.
+type alignedFileBasicInfo struct {
+ CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64
+ FileAttributes uint32
+ _ uint32 // padding
+}
+
+// GetFileBasicInfo retrieves times and attributes for a file.
+func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) {
+ bi := &alignedFileBasicInfo{}
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(bi)),
+ uint32(unsafe.Sizeof(*bi)),
+ ); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the
+ // public API of this module. The data may be unnecessarily aligned.
+ return (*FileBasicInfo)(unsafe.Pointer(bi)), nil
+}
+
+// SetFileBasicInfo sets times and attributes for a file.
+func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error {
+ // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is
+ // suitable to pass to GetFileInformationByHandleEx.
+ biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi))
+ if err := windows.SetFileInformationByHandle(
+ windows.Handle(f.Fd()),
+ windows.FileBasicInfo,
+ (*byte)(unsafe.Pointer(&biAligned)),
+ uint32(unsafe.Sizeof(biAligned)),
+ ); err != nil {
+ return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return nil
+}
+
+// FileStandardInfo contains extended information for the file.
+// FILE_STANDARD_INFO in WinBase.h
+// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info
+type FileStandardInfo struct {
+ AllocationSize, EndOfFile int64
+ NumberOfLinks uint32
+ DeletePending, Directory bool
+}
+
+// GetFileStandardInfo retrieves ended information for the file.
+func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) {
+ si := &FileStandardInfo{}
+ if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()),
+ windows.FileStandardInfo,
+ (*byte)(unsafe.Pointer(si)),
+ uint32(unsafe.Sizeof(*si))); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return si, nil
+}
+
+// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
+// unique on a system.
+type FileIDInfo struct {
+ VolumeSerialNumber uint64
+ FileID [16]byte
+}
+
+// GetFileID retrieves the unique (volume, file ID) pair for a file.
+func GetFileID(f *os.File) (*FileIDInfo, error) {
+ fileID := &FileIDInfo{}
+ if err := windows.GetFileInformationByHandleEx(
+ windows.Handle(f.Fd()),
+ windows.FileIdInfo,
+ (*byte)(unsafe.Pointer(fileID)),
+ uint32(unsafe.Sizeof(*fileID)),
+ ); err != nil {
+ return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err}
+ }
+ runtime.KeepAlive(f)
+ return fileID, nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go
new file mode 100644
index 00000000..c4fdd9d4
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/hvsock.go
@@ -0,0 +1,582 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/socket"
+ "github.com/Microsoft/go-winio/pkg/guid"
+)
+
+const afHVSock = 34 // AF_HYPERV
+
+// Well known Service and VM IDs
+// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
+
+// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
+func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
+ return guid.GUID{}
+}
+
+// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
+func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
+ return guid.GUID{
+ Data1: 0xffffffff,
+ Data2: 0xffff,
+ Data3: 0xffff,
+ Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ }
+}
+
+// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector.
+func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838
+ return guid.GUID{
+ Data1: 0xe0e16197,
+ Data2: 0xdd56,
+ Data3: 0x4a10,
+ Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38},
+ }
+}
+
+// HvsockGUIDSiloHost is the address of a silo's host partition:
+// - The silo host of a hosted silo is the utility VM.
+// - The silo host of a silo on a physical host is the physical host.
+func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568
+ return guid.GUID{
+ Data1: 0x36bd0c5c,
+ Data2: 0x7276,
+ Data3: 0x4223,
+ Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68},
+ }
+}
+
+// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions.
+func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd
+ return guid.GUID{
+ Data1: 0x90db8b89,
+ Data2: 0xd35,
+ Data3: 0x4f79,
+ Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd},
+ }
+}
+
+// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition.
+// Listening on this VmId accepts connection from:
+// - Inside silos: silo host partition.
+// - Inside hosted silo: host of the VM.
+// - Inside VM: VM host.
+// - Physical host: Not supported.
+func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878
+ return guid.GUID{
+ Data1: 0xa42e7cda,
+ Data2: 0xd03f,
+ Data3: 0x480c,
+ Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78},
+ }
+}
+
+// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol.
+func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3
+ return guid.GUID{
+ Data2: 0xfacb,
+ Data3: 0x11e6,
+ Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3},
+ }
+}
+
+// An HvsockAddr is an address for a AF_HYPERV socket.
+type HvsockAddr struct {
+ VMID guid.GUID
+ ServiceID guid.GUID
+}
+
+type rawHvsockAddr struct {
+ Family uint16
+ _ uint16
+ VMID guid.GUID
+ ServiceID guid.GUID
+}
+
+var _ socket.RawSockaddr = &rawHvsockAddr{}
+
+// Network returns the address's network name, "hvsock".
+func (*HvsockAddr) Network() string {
+ return "hvsock"
+}
+
+func (addr *HvsockAddr) String() string {
+ return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
+}
+
+// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
+func VsockServiceID(port uint32) guid.GUID {
+ g := hvsockVsockServiceTemplate() // make a copy
+ g.Data1 = port
+ return g
+}
+
+func (addr *HvsockAddr) raw() rawHvsockAddr {
+ return rawHvsockAddr{
+ Family: afHVSock,
+ VMID: addr.VMID,
+ ServiceID: addr.ServiceID,
+ }
+}
+
+func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
+ addr.VMID = raw.VMID
+ addr.ServiceID = raw.ServiceID
+}
+
+// Sockaddr returns a pointer to and the size of this struct.
+//
+// Implements the [socket.RawSockaddr] interface, and allows use in
+// [socket.Bind] and [socket.ConnectEx].
+func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) {
+ return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil
+}
+
+// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`.
+func (r *rawHvsockAddr) FromBytes(b []byte) error {
+ n := int(unsafe.Sizeof(rawHvsockAddr{}))
+
+ if len(b) < n {
+ return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize)
+ }
+
+ copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n])
+ if r.Family != afHVSock {
+ return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily)
+ }
+
+ return nil
+}
+
+// HvsockListener is a socket listener for the AF_HYPERV address family.
+type HvsockListener struct {
+ sock *win32File
+ addr HvsockAddr
+}
+
+var _ net.Listener = &HvsockListener{}
+
+// HvsockConn is a connected socket of the AF_HYPERV address family.
+type HvsockConn struct {
+ sock *win32File
+ local, remote HvsockAddr
+}
+
+var _ net.Conn = &HvsockConn{}
+
+func newHVSocket() (*win32File, error) {
+ fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1)
+ if err != nil {
+ return nil, os.NewSyscallError("socket", err)
+ }
+ f, err := makeWin32File(fd)
+ if err != nil {
+ windows.Close(fd)
+ return nil, err
+ }
+ f.socket = true
+ return f, nil
+}
+
+// ListenHvsock listens for connections on the specified hvsock address.
+func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
+ l := &HvsockListener{addr: *addr}
+
+ var sock *win32File
+ sock, err = newHVSocket()
+ if err != nil {
+ return nil, l.opErr("listen", err)
+ }
+ defer func() {
+ if err != nil {
+ _ = sock.Close()
+ }
+ }()
+
+ sa := addr.raw()
+ err = socket.Bind(sock.handle, &sa)
+ if err != nil {
+ return nil, l.opErr("listen", os.NewSyscallError("socket", err))
+ }
+ err = windows.Listen(sock.handle, 16)
+ if err != nil {
+ return nil, l.opErr("listen", os.NewSyscallError("listen", err))
+ }
+ return &HvsockListener{sock: sock, addr: *addr}, nil
+}
+
+func (l *HvsockListener) opErr(op string, err error) error {
+ return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
+}
+
+// Addr returns the listener's network address.
+func (l *HvsockListener) Addr() net.Addr {
+ return &l.addr
+}
+
+// Accept waits for the next connection and returns it.
+func (l *HvsockListener) Accept() (_ net.Conn, err error) {
+ sock, err := newHVSocket()
+ if err != nil {
+ return nil, l.opErr("accept", err)
+ }
+ defer func() {
+ if sock != nil {
+ sock.Close()
+ }
+ }()
+ c, err := l.sock.prepareIO()
+ if err != nil {
+ return nil, l.opErr("accept", err)
+ }
+ defer l.sock.wg.Done()
+
+ // AcceptEx, per documentation, requires an extra 16 bytes per address.
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex
+ const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
+ var addrbuf [addrlen * 2]byte
+
+ var bytes uint32
+ err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
+ if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
+ return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
+ }
+
+ conn := &HvsockConn{
+ sock: sock,
+ }
+ // The local address returned in the AcceptEx buffer is the same as the Listener socket's
+ // address. However, the service GUID reported by GetSockName is different from the Listeners
+ // socket, and is sometimes the same as the local address of the socket that dialed the
+ // address, with the service GUID.Data1 incremented, but othertimes is different.
+ // todo: does the local address matter? is the listener's address or the actual address appropriate?
+ conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
+ conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
+
+ // initialize the accepted socket and update its properties with those of the listening socket
+ if err = windows.Setsockopt(sock.handle,
+ windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT,
+ (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil {
+ return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err))
+ }
+
+ sock = nil
+ return conn, nil
+}
+
+// Close closes the listener, causing any pending Accept calls to fail.
+func (l *HvsockListener) Close() error {
+ return l.sock.Close()
+}
+
+// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]).
+type HvsockDialer struct {
+ // Deadline is the time the Dial operation must connect before erroring.
+ Deadline time.Time
+
+ // Retries is the number of additional connects to try if the connection times out, is refused,
+ // or the host is unreachable
+ Retries uint
+
+ // RetryWait is the time to wait after a connection error to retry
+ RetryWait time.Duration
+
+ rt *time.Timer // redial wait timer
+}
+
+// Dial the Hyper-V socket at addr.
+//
+// See [HvsockDialer.Dial] for more information.
+func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ return (&HvsockDialer{}).Dial(ctx, addr)
+}
+
+// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful.
+// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between
+// retries.
+//
+// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx.
+func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) {
+ op := "dial"
+ // create the conn early to use opErr()
+ conn = &HvsockConn{
+ remote: *addr,
+ }
+
+ if !d.Deadline.IsZero() {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithDeadline(ctx, d.Deadline)
+ defer cancel()
+ }
+
+ // preemptive timeout/cancellation check
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
+ }
+
+ sock, err := newHVSocket()
+ if err != nil {
+ return nil, conn.opErr(op, err)
+ }
+ defer func() {
+ if sock != nil {
+ sock.Close()
+ }
+ }()
+
+ sa := addr.raw()
+ err = socket.Bind(sock.handle, &sa)
+ if err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("bind", err))
+ }
+
+ c, err := sock.prepareIO()
+ if err != nil {
+ return nil, conn.opErr(op, err)
+ }
+ defer sock.wg.Done()
+ var bytes uint32
+ for i := uint(0); i <= d.Retries; i++ {
+ err = socket.ConnectEx(
+ sock.handle,
+ &sa,
+ nil, // sendBuf
+ 0, // sendDataLen
+ &bytes,
+ (*windows.Overlapped)(unsafe.Pointer(&c.o)))
+ _, err = sock.asyncIO(c, nil, bytes, err)
+ if i < d.Retries && canRedial(err) {
+ if err = d.redialWait(ctx); err == nil {
+ continue
+ }
+ }
+ break
+ }
+ if err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("connectex", err))
+ }
+
+ // update the connection properties, so shutdown can be used
+ if err = windows.Setsockopt(
+ sock.handle,
+ windows.SOL_SOCKET,
+ windows.SO_UPDATE_CONNECT_CONTEXT,
+ nil, // optvalue
+ 0, // optlen
+ ); err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err))
+ }
+
+ // get the local name
+ var sal rawHvsockAddr
+ err = socket.GetSockName(sock.handle, &sal)
+ if err != nil {
+ return nil, conn.opErr(op, os.NewSyscallError("getsockname", err))
+ }
+ conn.local.fromRaw(&sal)
+
+ // one last check for timeout, since asyncIO doesn't check the context
+ if err = ctx.Err(); err != nil {
+ return nil, conn.opErr(op, err)
+ }
+
+ conn.sock = sock
+ sock = nil
+
+ return conn, nil
+}
+
+// redialWait waits before attempting to redial, resetting the timer as appropriate.
+func (d *HvsockDialer) redialWait(ctx context.Context) (err error) {
+ if d.RetryWait == 0 {
+ return nil
+ }
+
+ if d.rt == nil {
+ d.rt = time.NewTimer(d.RetryWait)
+ } else {
+ // should already be stopped and drained
+ d.rt.Reset(d.RetryWait)
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-d.rt.C:
+ return nil
+ }
+
+ // stop and drain the timer
+ if !d.rt.Stop() {
+ <-d.rt.C
+ }
+ return ctx.Err()
+}
+
+// assumes error is a plain, unwrapped windows.Errno provided by direct syscall.
+func canRedial(err error) bool {
+ //nolint:errorlint // guaranteed to be an Errno
+ switch err {
+ case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT,
+ windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL:
+ return true
+ default:
+ return false
+ }
+}
+
+func (conn *HvsockConn) opErr(op string, err error) error {
+ // translate from "file closed" to "socket closed"
+ if errors.Is(err, ErrFileClosed) {
+ err = socket.ErrSocketClosed
+ }
+ return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
+}
+
+func (conn *HvsockConn) Read(b []byte) (int, error) {
+ c, err := conn.sock.prepareIO()
+ if err != nil {
+ return 0, conn.opErr("read", err)
+ }
+ defer conn.sock.wg.Done()
+ buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+ var flags, bytes uint32
+ err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
+ n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err)
+ if err != nil {
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsarecv", eno)
+ }
+ return 0, conn.opErr("read", err)
+ } else if n == 0 {
+ err = io.EOF
+ }
+ return n, err
+}
+
+func (conn *HvsockConn) Write(b []byte) (int, error) {
+ t := 0
+ for len(b) != 0 {
+ n, err := conn.write(b)
+ if err != nil {
+ return t + n, err
+ }
+ t += n
+ b = b[n:]
+ }
+ return t, nil
+}
+
+func (conn *HvsockConn) write(b []byte) (int, error) {
+ c, err := conn.sock.prepareIO()
+ if err != nil {
+ return 0, conn.opErr("write", err)
+ }
+ defer conn.sock.wg.Done()
+ buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))}
+ var bytes uint32
+ err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
+ n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err)
+ if err != nil {
+ var eno windows.Errno
+ if errors.As(err, &eno) {
+ err = os.NewSyscallError("wsasend", eno)
+ }
+ return 0, conn.opErr("write", err)
+ }
+ return n, err
+}
+
+// Close closes the socket connection, failing any pending read or write calls.
+func (conn *HvsockConn) Close() error {
+ return conn.sock.Close()
+}
+
+func (conn *HvsockConn) IsClosed() bool {
+ return conn.sock.IsClosed()
+}
+
+// shutdown disables sending or receiving on a socket.
+func (conn *HvsockConn) shutdown(how int) error {
+ if conn.IsClosed() {
+ return socket.ErrSocketClosed
+ }
+
+ err := windows.Shutdown(conn.sock.handle, how)
+ if err != nil {
+ // If the connection was closed, shutdowns fail with "not connected"
+ if errors.Is(err, windows.WSAENOTCONN) ||
+ errors.Is(err, windows.WSAESHUTDOWN) {
+ err = socket.ErrSocketClosed
+ }
+ return os.NewSyscallError("shutdown", err)
+ }
+ return nil
+}
+
+// CloseRead shuts down the read end of the socket, preventing future read operations.
+func (conn *HvsockConn) CloseRead() error {
+ err := conn.shutdown(windows.SHUT_RD)
+ if err != nil {
+ return conn.opErr("closeread", err)
+ }
+ return nil
+}
+
+// CloseWrite shuts down the write end of the socket, preventing future write operations and
+// notifying the other endpoint that no more data will be written.
+func (conn *HvsockConn) CloseWrite() error {
+ err := conn.shutdown(windows.SHUT_WR)
+ if err != nil {
+ return conn.opErr("closewrite", err)
+ }
+ return nil
+}
+
+// LocalAddr returns the local address of the connection.
+func (conn *HvsockConn) LocalAddr() net.Addr {
+ return &conn.local
+}
+
+// RemoteAddr returns the remote address of the connection.
+func (conn *HvsockConn) RemoteAddr() net.Addr {
+ return &conn.remote
+}
+
+// SetDeadline implements the net.Conn SetDeadline method.
+func (conn *HvsockConn) SetDeadline(t time.Time) error {
+ // todo: implement `SetDeadline` for `win32File`
+ if err := conn.SetReadDeadline(t); err != nil {
+ return fmt.Errorf("set read deadline: %w", err)
+ }
+ if err := conn.SetWriteDeadline(t); err != nil {
+ return fmt.Errorf("set write deadline: %w", err)
+ }
+ return nil
+}
+
+// SetReadDeadline implements the net.Conn SetReadDeadline method.
+func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
+ return conn.sock.SetReadDeadline(t)
+}
+
+// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
+func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
+ return conn.sock.SetWriteDeadline(t)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
new file mode 100644
index 00000000..1f653881
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
@@ -0,0 +1,2 @@
+// This package contains Win32 filesystem functionality.
+package fs
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
new file mode 100644
index 00000000..0cd9621d
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
@@ -0,0 +1,262 @@
+//go:build windows
+
+package fs
+
+import (
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/stringbuffer"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
+
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
+//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
+
+const NullHandle windows.Handle = 0
+
+// AccessMask defines standard, specific, and generic rights.
+//
+// Used with CreateFile and NtCreateFile (and co.).
+//
+// Bitmask:
+// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+// +---------------+---------------+-------------------------------+
+// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
+// |R|W|E|A| |S| | |
+// +-+-------------+---------------+-------------------------------+
+//
+// GR Generic Read
+// GW Generic Write
+// GE Generic Exectue
+// GA Generic All
+// Resvd Reserved
+// AS Access Security System
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
+//
+// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
+type AccessMask = windows.ACCESS_MASK
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // Not actually any.
+ //
+ // For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
+ // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
+ FILE_ANY_ACCESS AccessMask = 0
+
+ GENERIC_READ AccessMask = 0x8000_0000
+ GENERIC_WRITE AccessMask = 0x4000_0000
+ GENERIC_EXECUTE AccessMask = 0x2000_0000
+ GENERIC_ALL AccessMask = 0x1000_0000
+ ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000
+
+ // Specific Object Access
+ // from ntioapi.h
+
+ FILE_READ_DATA AccessMask = (0x0001) // file & pipe
+ FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
+
+ FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
+ FILE_ADD_FILE AccessMask = (0x0002) // directory
+
+ FILE_APPEND_DATA AccessMask = (0x0004) // file
+ FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
+ FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
+
+ FILE_READ_EA AccessMask = (0x0008) // file & directory
+ FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
+
+ FILE_WRITE_EA AccessMask = (0x0010) // file & directory
+ FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
+
+ FILE_EXECUTE AccessMask = (0x0020) // file
+ FILE_TRAVERSE AccessMask = (0x0020) // directory
+
+ FILE_DELETE_CHILD AccessMask = (0x0040) // directory
+
+ FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
+
+ FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
+
+ FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
+ FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
+ FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
+ FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
+
+ SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
+
+ // Standard Access
+ // from ntseapi.h
+
+ DELETE AccessMask = 0x0001_0000
+ READ_CONTROL AccessMask = 0x0002_0000
+ WRITE_DAC AccessMask = 0x0004_0000
+ WRITE_OWNER AccessMask = 0x0008_0000
+ SYNCHRONIZE AccessMask = 0x0010_0000
+
+ STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
+
+ STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
+ STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
+ STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
+
+ STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
+)
+
+type FileShareMode uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ FILE_SHARE_NONE FileShareMode = 0x00
+ FILE_SHARE_READ FileShareMode = 0x01
+ FILE_SHARE_WRITE FileShareMode = 0x02
+ FILE_SHARE_DELETE FileShareMode = 0x04
+ FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
+)
+
+type FileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winbase.h
+
+ CREATE_NEW FileCreationDisposition = 0x01
+ CREATE_ALWAYS FileCreationDisposition = 0x02
+ OPEN_EXISTING FileCreationDisposition = 0x03
+ OPEN_ALWAYS FileCreationDisposition = 0x04
+ TRUNCATE_EXISTING FileCreationDisposition = 0x05
+)
+
+// Create disposition values for NtCreate*
+type NTFileCreationDisposition uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // From ntioapi.h
+
+ FILE_SUPERSEDE NTFileCreationDisposition = 0x00
+ FILE_OPEN NTFileCreationDisposition = 0x01
+ FILE_CREATE NTFileCreationDisposition = 0x02
+ FILE_OPEN_IF NTFileCreationDisposition = 0x03
+ FILE_OVERWRITE NTFileCreationDisposition = 0x04
+ FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05
+ FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05
+)
+
+// CreateFile and co. take flags or attributes together as one parameter.
+// Define alias until we can use generics to allow both
+//
+// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
+type FileFlagOrAttribute uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winnt.h
+
+ FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
+ FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
+ FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
+ FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
+ FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
+ FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
+ FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
+ FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
+ FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
+ FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
+ FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
+)
+
+// NtCreate* functions take a dedicated CreateOptions parameter.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile
+//
+// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file
+type NTCreateOptions uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // From ntioapi.h
+
+ FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001
+ FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002
+ FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004
+ FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008
+
+ FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010
+ FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020
+ FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040
+ FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080
+
+ FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100
+ FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200
+ FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400
+ FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800
+
+ FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000
+ FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000
+ FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000
+ FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000
+)
+
+type FileSQSFlag = FileFlagOrAttribute
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ // from winbase.h
+
+ SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
+ SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
+ SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
+ SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
+
+ SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000
+ SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000
+)
+
+// GetFinalPathNameByHandle flags
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
+type GetFinalPathFlag uint32
+
+//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
+const (
+ GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
+
+ FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
+ FILE_NAME_OPENED GetFinalPathFlag = 0x8
+
+ VOLUME_NAME_DOS GetFinalPathFlag = 0x0
+ VOLUME_NAME_GUID GetFinalPathFlag = 0x1
+ VOLUME_NAME_NT GetFinalPathFlag = 0x2
+ VOLUME_NAME_NONE GetFinalPathFlag = 0x4
+)
+
+// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
+// with the given handle and flags. It transparently takes care of creating a buffer of the
+// correct size for the call.
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
+func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
+ b := stringbuffer.NewWString()
+ //TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
+ for {
+ n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
+ if err != nil {
+ return "", err
+ }
+ // If the buffer wasn't large enough, n will be the total size needed (including null terminator).
+ // Resize and try again.
+ if n > b.Cap() {
+ b.ResizeTo(n)
+ continue
+ }
+ // If the buffer is large enough, n will be the size not including the null terminator.
+ // Convert to a Go string and return.
+ return b.String(), nil
+ }
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/security.go b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
new file mode 100644
index 00000000..81760ac6
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/security.go
@@ -0,0 +1,12 @@
+package fs
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
+type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
+
+// Impersonation levels
+const (
+ SecurityAnonymous SecurityImpersonationLevel = 0
+ SecurityIdentification SecurityImpersonationLevel = 1
+ SecurityImpersonation SecurityImpersonationLevel = 2
+ SecurityDelegation SecurityImpersonationLevel = 3
+)
diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
new file mode 100644
index 00000000..a94e234c
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
@@ -0,0 +1,61 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package fs
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procCreateFileW = modkernel32.NewProc("CreateFileW")
+)
+
+func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
+}
+
+func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
new file mode 100644
index 00000000..7e82f9af
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go
@@ -0,0 +1,20 @@
+package socket
+
+import (
+ "unsafe"
+)
+
+// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The
+// struct must meet the Win32 sockaddr requirements specified here:
+// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2
+//
+// Specifically, the struct size must be least larger than an int16 (unsigned short)
+// for the address family.
+type RawSockaddr interface {
+ // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing
+ // for the RawSockaddr's data to be overwritten by syscalls (if necessary).
+ //
+ // It is the callers responsibility to validate that the values are valid; invalid
+ // pointers or size can cause a panic.
+ Sockaddr() (unsafe.Pointer, int32, error)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
new file mode 100644
index 00000000..88580d97
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
@@ -0,0 +1,177 @@
+//go:build windows
+
+package socket
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/Microsoft/go-winio/pkg/guid"
+ "golang.org/x/sys/windows"
+)
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go
+
+//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname
+//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername
+//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
+
+const socketError = uintptr(^uint32(0))
+
+var (
+ // todo(helsaawy): create custom error types to store the desired vs actual size and addr family?
+
+ ErrBufferSize = errors.New("buffer size")
+ ErrAddrFamily = errors.New("address family")
+ ErrInvalidPointer = errors.New("invalid pointer")
+ ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed)
+)
+
+// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error)
+
+// GetSockName writes the local address of socket s to the [RawSockaddr] rsa.
+// If rsa is not large enough, the [windows.WSAEFAULT] is returned.
+func GetSockName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ // although getsockname returns WSAEFAULT if the buffer is too small, it does not set
+ // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy
+ return getsockname(s, ptr, &l)
+}
+
+// GetPeerName returns the remote address the socket is connected to.
+//
+// See [GetSockName] for more information.
+func GetPeerName(s windows.Handle, rsa RawSockaddr) error {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return getpeername(s, ptr, &l)
+}
+
+func Bind(s windows.Handle, rsa RawSockaddr) (err error) {
+ ptr, l, err := rsa.Sockaddr()
+ if err != nil {
+ return fmt.Errorf("could not retrieve socket pointer and size: %w", err)
+ }
+
+ return bind(s, ptr, l)
+}
+
+// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the
+// their sockaddr interface, so they cannot be used with HvsockAddr
+// Replicate functionality here from
+// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go
+
+// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at
+// runtime via a WSAIoctl call:
+// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks
+
+type runtimeFunc struct {
+ id guid.GUID
+ once sync.Once
+ addr uintptr
+ err error
+}
+
+func (f *runtimeFunc) Load() error {
+ f.once.Do(func() {
+ var s windows.Handle
+ s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP)
+ if f.err != nil {
+ return
+ }
+ defer windows.CloseHandle(s) //nolint:errcheck
+
+ var n uint32
+ f.err = windows.WSAIoctl(s,
+ windows.SIO_GET_EXTENSION_FUNCTION_POINTER,
+ (*byte)(unsafe.Pointer(&f.id)),
+ uint32(unsafe.Sizeof(f.id)),
+ (*byte)(unsafe.Pointer(&f.addr)),
+ uint32(unsafe.Sizeof(f.addr)),
+ &n,
+ nil, // overlapped
+ 0, // completionRoutine
+ )
+ })
+ return f.err
+}
+
+var (
+ // todo: add `AcceptEx` and `GetAcceptExSockaddrs`
+ WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS
+ Data1: 0x25a207b9,
+ Data2: 0xddf3,
+ Data3: 0x4660,
+ Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+ }
+
+ connectExFunc = runtimeFunc{id: WSAID_CONNECTEX}
+)
+
+func ConnectEx(
+ fd windows.Handle,
+ rsa RawSockaddr,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) error {
+ if err := connectExFunc.Load(); err != nil {
+ return fmt.Errorf("failed to load ConnectEx function pointer: %w", err)
+ }
+ ptr, n, err := rsa.Sockaddr()
+ if err != nil {
+ return err
+ }
+ return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+// BOOL LpfnConnectex(
+// [in] SOCKET s,
+// [in] const sockaddr *name,
+// [in] int namelen,
+// [in, optional] PVOID lpSendBuffer,
+// [in] DWORD dwSendDataLength,
+// [out] LPDWORD lpdwBytesSent,
+// [in] LPOVERLAPPED lpOverlapped
+// )
+
+func connectEx(
+ s windows.Handle,
+ name unsafe.Pointer,
+ namelen int32,
+ sendBuf *byte,
+ sendDataLen uint32,
+ bytesSent *uint32,
+ overlapped *windows.Overlapped,
+) (err error) {
+ r1, _, e1 := syscall.SyscallN(connectExFunc.addr,
+ uintptr(s),
+ uintptr(name),
+ uintptr(namelen),
+ uintptr(unsafe.Pointer(sendBuf)),
+ uintptr(sendDataLen),
+ uintptr(unsafe.Pointer(bytesSent)),
+ uintptr(unsafe.Pointer(overlapped)),
+ )
+
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
new file mode 100644
index 00000000..e1504126
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go
@@ -0,0 +1,69 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package socket
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
+
+ procbind = modws2_32.NewProc("bind")
+ procgetpeername = modws2_32.NewProc("getpeername")
+ procgetsockname = modws2_32.NewProc("getsockname")
+)
+
+func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen)))
+ if r1 == socketError {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
new file mode 100644
index 00000000..42ebc019
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
@@ -0,0 +1,132 @@
+package stringbuffer
+
+import (
+ "sync"
+ "unicode/utf16"
+)
+
+// TODO: worth exporting and using in mkwinsyscall?
+
+// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
+// large path strings:
+// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
+const MinWStringCap = 310
+
+// use *[]uint16 since []uint16 creates an extra allocation where the slice header
+// is copied to heap and then referenced via pointer in the interface header that sync.Pool
+// stores.
+var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
+ New: func() interface{} {
+ b := make([]uint16, MinWStringCap)
+ return &b
+ },
+}
+
+func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
+
+// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
+// This avoids taking a pointer to the slice header in WString, which can be set to nil.
+func freeBuffer(b []uint16) { pathPool.Put(&b) }
+
+// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
+// for interacting with Win32 APIs.
+// Sizes are specified as uint32 and not int.
+//
+// It is not thread safe.
+type WString struct {
+ // type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
+
+ // raw buffer
+ b []uint16
+}
+
+// NewWString returns a [WString] allocated from a shared pool with an
+// initial capacity of at least [MinWStringCap].
+// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
+//
+// The buffer should be freed via [WString.Free]
+func NewWString() *WString {
+ return &WString{
+ b: newBuffer(),
+ }
+}
+
+func (b *WString) Free() {
+ if b.empty() {
+ return
+ }
+ freeBuffer(b.b)
+ b.b = nil
+}
+
+// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
+// previous buffer back into pool.
+func (b *WString) ResizeTo(c uint32) uint32 {
+ // already sufficient (or n is 0)
+ if c <= b.Cap() {
+ return b.Cap()
+ }
+
+ if c <= MinWStringCap {
+ c = MinWStringCap
+ }
+ // allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
+ if c <= 2*b.Cap() {
+ c = 2 * b.Cap()
+ }
+
+ b2 := make([]uint16, c)
+ if !b.empty() {
+ copy(b2, b.b)
+ freeBuffer(b.b)
+ }
+ b.b = b2
+ return c
+}
+
+// Buffer returns the underlying []uint16 buffer.
+func (b *WString) Buffer() []uint16 {
+ if b.empty() {
+ return nil
+ }
+ return b.b
+}
+
+// Pointer returns a pointer to the first uint16 in the buffer.
+// If the [WString.Free] has already been called, the pointer will be nil.
+func (b *WString) Pointer() *uint16 {
+ if b.empty() {
+ return nil
+ }
+ return &b.b[0]
+}
+
+// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
+//
+// It assumes that the data is null-terminated.
+func (b *WString) String() string {
+ // Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
+ // and would make this code Windows-only, which makes no sense.
+ // So copy UTF16ToString code into here.
+ // If other windows-specific code is added, switch to [windows.UTF16ToString]
+
+ s := b.b
+ for i, v := range s {
+ if v == 0 {
+ s = s[:i]
+ break
+ }
+ }
+ return string(utf16.Decode(s))
+}
+
+// Cap returns the underlying buffer capacity.
+func (b *WString) Cap() uint32 {
+ if b.empty() {
+ return 0
+ }
+ return b.cap()
+}
+
+func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
+func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go
new file mode 100644
index 00000000..a2da6639
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pipe.go
@@ -0,0 +1,586 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "runtime"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "github.com/Microsoft/go-winio/internal/fs"
+)
+
+//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe
+//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW
+//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe
+//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
+//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile
+//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
+//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U
+//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl
+
+type PipeConn interface {
+ net.Conn
+ Disconnect() error
+ Flush() error
+}
+
+// type aliases for mkwinsyscall code
+type (
+ ntAccessMask = fs.AccessMask
+ ntFileShareMode = fs.FileShareMode
+ ntFileCreationDisposition = fs.NTFileCreationDisposition
+ ntFileOptions = fs.NTCreateOptions
+)
+
+type ioStatusBlock struct {
+ Status, Information uintptr
+}
+
+// typedef struct _OBJECT_ATTRIBUTES {
+// ULONG Length;
+// HANDLE RootDirectory;
+// PUNICODE_STRING ObjectName;
+// ULONG Attributes;
+// PVOID SecurityDescriptor;
+// PVOID SecurityQualityOfService;
+// } OBJECT_ATTRIBUTES;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes
+type objectAttributes struct {
+ Length uintptr
+ RootDirectory uintptr
+ ObjectName *unicodeString
+ Attributes uintptr
+ SecurityDescriptor *securityDescriptor
+ SecurityQoS uintptr
+}
+
+type unicodeString struct {
+ Length uint16
+ MaximumLength uint16
+ Buffer uintptr
+}
+
+// typedef struct _SECURITY_DESCRIPTOR {
+// BYTE Revision;
+// BYTE Sbz1;
+// SECURITY_DESCRIPTOR_CONTROL Control;
+// PSID Owner;
+// PSID Group;
+// PACL Sacl;
+// PACL Dacl;
+// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR;
+//
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor
+type securityDescriptor struct {
+ Revision byte
+ Sbz1 byte
+ Control uint16
+ Owner uintptr
+ Group uintptr
+ Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl
+ Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl
+}
+
+type ntStatus int32
+
+func (status ntStatus) Err() error {
+ if status >= 0 {
+ return nil
+ }
+ return rtlNtStatusToDosError(status)
+}
+
+var (
+ // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
+ ErrPipeListenerClosed = net.ErrClosed
+
+ errPipeWriteClosed = errors.New("pipe has been closed for write")
+)
+
+type win32Pipe struct {
+ *win32File
+ path string
+}
+
+var _ PipeConn = (*win32Pipe)(nil)
+
+type win32MessageBytePipe struct {
+ win32Pipe
+ writeClosed bool
+ readEOF bool
+}
+
+type pipeAddress string
+
+func (f *win32Pipe) LocalAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) RemoteAddr() net.Addr {
+ return pipeAddress(f.path)
+}
+
+func (f *win32Pipe) SetDeadline(t time.Time) error {
+ if err := f.SetReadDeadline(t); err != nil {
+ return err
+ }
+ return f.SetWriteDeadline(t)
+}
+
+func (f *win32Pipe) Disconnect() error {
+ return disconnectNamedPipe(f.win32File.handle)
+}
+
+// CloseWrite closes the write side of a message pipe in byte mode.
+func (f *win32MessageBytePipe) CloseWrite() error {
+ if f.writeClosed {
+ return errPipeWriteClosed
+ }
+ err := f.win32File.Flush()
+ if err != nil {
+ return err
+ }
+ _, err = f.win32File.Write(nil)
+ if err != nil {
+ return err
+ }
+ f.writeClosed = true
+ return nil
+}
+
+// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
+// they are used to implement CloseWrite().
+func (f *win32MessageBytePipe) Write(b []byte) (int, error) {
+ if f.writeClosed {
+ return 0, errPipeWriteClosed
+ }
+ if len(b) == 0 {
+ return 0, nil
+ }
+ return f.win32File.Write(b)
+}
+
+// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
+// mode pipe will return io.EOF, as will all subsequent reads.
+func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
+ if f.readEOF {
+ return 0, io.EOF
+ }
+ n, err := f.win32File.Read(b)
+ if err == io.EOF { //nolint:errorlint
+ // If this was the result of a zero-byte read, then
+ // it is possible that the read was due to a zero-size
+ // message. Since we are simulating CloseWrite with a
+ // zero-byte message, ensure that all future Read() calls
+ // also return EOF.
+ f.readEOF = true
+ } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno
+ // ERROR_MORE_DATA indicates that the pipe's read mode is message mode
+ // and the message still has more bytes. Treat this as a success, since
+ // this package presents all named pipes as byte streams.
+ err = nil
+ }
+ return n, err
+}
+
+func (pipeAddress) Network() string {
+ return "pipe"
+}
+
+func (s pipeAddress) String() string {
+ return string(s)
+}
+
+// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
+func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) {
+ for {
+ select {
+ case <-ctx.Done():
+ return windows.Handle(0), ctx.Err()
+ default:
+ h, err := fs.CreateFile(*path,
+ access,
+ 0, // mode
+ nil, // security attributes
+ fs.OPEN_EXISTING,
+ fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel),
+ 0, // template file handle
+ )
+ if err == nil {
+ return h, nil
+ }
+ if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno
+ return h, &os.PathError{Err: err, Op: "open", Path: *path}
+ }
+ // Wait 10 msec and try again. This is a rather simplistic
+ // view, as we always try each 10 milliseconds.
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+}
+
+// DialPipe connects to a named pipe by path, timing out if the connection
+// takes longer than the specified duration. If timeout is nil, then we use
+// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
+func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
+ var absTimeout time.Time
+ if timeout != nil {
+ absTimeout = time.Now().Add(*timeout)
+ } else {
+ absTimeout = time.Now().Add(2 * time.Second)
+ }
+ ctx, cancel := context.WithDeadline(context.Background(), absTimeout)
+ defer cancel()
+ conn, err := DialPipeContext(ctx, path)
+ if errors.Is(err, context.DeadlineExceeded) {
+ return nil, ErrTimeout
+ }
+ return conn, err
+}
+
+// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
+// cancellation or timeout.
+func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
+ return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE))
+}
+
+// PipeImpLevel is an enumeration of impersonation levels that may be set
+// when calling DialPipeAccessImpersonation.
+type PipeImpLevel uint32
+
+const (
+ PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS)
+ PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION)
+ PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION)
+ PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION)
+)
+
+// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx`
+// cancellation or timeout.
+func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
+ return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous)
+}
+
+// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with
+// `access` at `impLevel` until `ctx` cancellation or timeout. The other
+// DialPipe* implementations use PipeImpLevelAnonymous.
+func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) {
+ var err error
+ var h windows.Handle
+ h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel)
+ if err != nil {
+ return nil, err
+ }
+
+ var flags uint32
+ err = getNamedPipeInfo(h, &flags, nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := makeWin32File(h)
+ if err != nil {
+ windows.Close(h)
+ return nil, err
+ }
+
+ // If the pipe is in message mode, return a message byte pipe, which
+ // supports CloseWrite().
+ if flags&windows.PIPE_TYPE_MESSAGE != 0 {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: f, path: path},
+ }, nil
+ }
+ return &win32Pipe{win32File: f, path: path}, nil
+}
+
+type acceptResponse struct {
+ f *win32File
+ err error
+}
+
+type win32PipeListener struct {
+ firstHandle windows.Handle
+ path string
+ config PipeConfig
+ acceptCh chan (chan acceptResponse)
+ closeCh chan int
+ doneCh chan int
+}
+
+func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) {
+ path16, err := windows.UTF16FromString(path)
+ if err != nil {
+ return 0, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+
+ var oa objectAttributes
+ oa.Length = unsafe.Sizeof(oa)
+
+ var ntPath unicodeString
+ if err := rtlDosPathNameToNtPathName(&path16[0],
+ &ntPath,
+ 0,
+ 0,
+ ).Err(); err != nil {
+ return 0, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+ defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck
+ oa.ObjectName = &ntPath
+ oa.Attributes = windows.OBJ_CASE_INSENSITIVE
+
+ // The security descriptor is only needed for the first pipe.
+ if first {
+ if sd != nil {
+ //todo: does `sdb` need to be allocated on the heap, or can go allocate it?
+ l := uint32(len(sd))
+ sdb, err := windows.LocalAlloc(0, l)
+ if err != nil {
+ return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err)
+ }
+ defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck
+ copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
+ oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
+ } else {
+ // Construct the default named pipe security descriptor.
+ var dacl uintptr
+ if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
+ return 0, fmt.Errorf("getting default named pipe ACL: %w", err)
+ }
+ defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck
+
+ sdb := &securityDescriptor{
+ Revision: 1,
+ Control: windows.SE_DACL_PRESENT,
+ Dacl: dacl,
+ }
+ oa.SecurityDescriptor = sdb
+ }
+ }
+
+ typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS)
+ if c.MessageMode {
+ typ |= windows.FILE_PIPE_MESSAGE_TYPE
+ }
+
+ disposition := fs.FILE_OPEN
+ access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE
+ if first {
+ disposition = fs.FILE_CREATE
+ // By not asking for read or write access, the named pipe file system
+ // will put this pipe into an initially disconnected state, blocking
+ // client connections until the next call with first == false.
+ access = fs.SYNCHRONIZE
+ }
+
+ timeout := int64(-50 * 10000) // 50ms
+
+ var (
+ h windows.Handle
+ iosb ioStatusBlock
+ )
+ err = ntCreateNamedPipeFile(&h,
+ access,
+ &oa,
+ &iosb,
+ fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE,
+ disposition,
+ 0,
+ typ,
+ 0,
+ 0,
+ 0xffffffff,
+ uint32(c.InputBufferSize),
+ uint32(c.OutputBufferSize),
+ &timeout).Err()
+ if err != nil {
+ return 0, &os.PathError{Op: "open", Path: path, Err: err}
+ }
+
+ runtime.KeepAlive(ntPath)
+ return h, nil
+}
+
+func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
+ h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
+ if err != nil {
+ return nil, err
+ }
+ f, err := makeWin32File(h)
+ if err != nil {
+ windows.Close(h)
+ return nil, err
+ }
+ return f, nil
+}
+
+func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) {
+ p, err := l.makeServerPipe()
+ if err != nil {
+ return nil, err
+ }
+
+ // Wait for the client to connect.
+ ch := make(chan error)
+ go func(p *win32File) {
+ ch <- connectPipe(p)
+ }(p)
+
+ select {
+ case err = <-ch:
+ if err != nil {
+ p.Close()
+ p = nil
+ }
+ case <-l.closeCh:
+ // Abort the connect request by closing the handle.
+ p.Close()
+ p = nil
+ err = <-ch
+ if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno
+ err = ErrPipeListenerClosed
+ }
+ }
+ return p, err
+}
+
+func (l *win32PipeListener) listenerRoutine() {
+ closed := false
+ for !closed {
+ select {
+ case <-l.closeCh:
+ closed = true
+ case responseCh := <-l.acceptCh:
+ var (
+ p *win32File
+ err error
+ )
+ for {
+ p, err = l.makeConnectedServerPipe()
+ // If the connection was immediately closed by the client, try
+ // again.
+ if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno
+ break
+ }
+ }
+ responseCh <- acceptResponse{p, err}
+ closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno
+ }
+ }
+ windows.Close(l.firstHandle)
+ l.firstHandle = 0
+ // Notify Close() and Accept() callers that the handle has been closed.
+ close(l.doneCh)
+}
+
+// PipeConfig contain configuration for the pipe listener.
+type PipeConfig struct {
+ // SecurityDescriptor contains a Windows security descriptor in SDDL format.
+ SecurityDescriptor string
+
+ // MessageMode determines whether the pipe is in byte or message mode. In either
+ // case the pipe is read in byte mode by default. The only practical difference in
+ // this implementation is that CloseWrite() is only supported for message mode pipes;
+ // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
+ // transferred to the reader (and returned as io.EOF in this implementation)
+ // when the pipe is in message mode.
+ MessageMode bool
+
+ // InputBufferSize specifies the size of the input buffer, in bytes.
+ InputBufferSize int32
+
+ // OutputBufferSize specifies the size of the output buffer, in bytes.
+ OutputBufferSize int32
+}
+
+// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
+// The pipe must not already exist.
+func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
+ var (
+ sd []byte
+ err error
+ )
+ if c == nil {
+ c = &PipeConfig{}
+ }
+ if c.SecurityDescriptor != "" {
+ sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor)
+ if err != nil {
+ return nil, err
+ }
+ }
+ h, err := makeServerPipeHandle(path, sd, c, true)
+ if err != nil {
+ return nil, err
+ }
+ l := &win32PipeListener{
+ firstHandle: h,
+ path: path,
+ config: *c,
+ acceptCh: make(chan (chan acceptResponse)),
+ closeCh: make(chan int),
+ doneCh: make(chan int),
+ }
+ go l.listenerRoutine()
+ return l, nil
+}
+
+func connectPipe(p *win32File) error {
+ c, err := p.prepareIO()
+ if err != nil {
+ return err
+ }
+ defer p.wg.Done()
+
+ err = connectNamedPipe(p.handle, &c.o)
+ _, err = p.asyncIO(c, nil, 0, err)
+ if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno
+ return err
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Accept() (net.Conn, error) {
+ ch := make(chan acceptResponse)
+ select {
+ case l.acceptCh <- ch:
+ response := <-ch
+ err := response.err
+ if err != nil {
+ return nil, err
+ }
+ if l.config.MessageMode {
+ return &win32MessageBytePipe{
+ win32Pipe: win32Pipe{win32File: response.f, path: l.path},
+ }, nil
+ }
+ return &win32Pipe{win32File: response.f, path: l.path}, nil
+ case <-l.doneCh:
+ return nil, ErrPipeListenerClosed
+ }
+}
+
+func (l *win32PipeListener) Close() error {
+ select {
+ case l.closeCh <- 1:
+ <-l.doneCh
+ case <-l.doneCh:
+ }
+ return nil
+}
+
+func (l *win32PipeListener) Addr() net.Addr {
+ return pipeAddress(l.path)
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
new file mode 100644
index 00000000..48ce4e92
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
@@ -0,0 +1,232 @@
+// Package guid provides a GUID type. The backing structure for a GUID is
+// identical to that used by the golang.org/x/sys/windows GUID type.
+// There are two main binary encodings used for a GUID, the big-endian encoding,
+// and the Windows (mixed-endian) encoding. See here for details:
+// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
+package guid
+
+import (
+ "crypto/rand"
+ "crypto/sha1" //nolint:gosec // not used for secure application
+ "encoding"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment
+
+// Variant specifies which GUID variant (or "type") of the GUID. It determines
+// how the entirety of the rest of the GUID is interpreted.
+type Variant uint8
+
+// The variants specified by RFC 4122 section 4.1.1.
+const (
+ // VariantUnknown specifies a GUID variant which does not conform to one of
+ // the variant encodings specified in RFC 4122.
+ VariantUnknown Variant = iota
+ VariantNCS
+ VariantRFC4122 // RFC 4122
+ VariantMicrosoft
+ VariantFuture
+)
+
+// Version specifies how the bits in the GUID were generated. For instance, a
+// version 4 GUID is randomly generated, and a version 5 is generated from the
+// hash of an input string.
+type Version uint8
+
+func (v Version) String() string {
+ return strconv.FormatUint(uint64(v), 10)
+}
+
+var _ = (encoding.TextMarshaler)(GUID{})
+var _ = (encoding.TextUnmarshaler)(&GUID{})
+
+// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
+func NewV4() (GUID, error) {
+ var b [16]byte
+ if _, err := rand.Read(b[:]); err != nil {
+ return GUID{}, err
+ }
+
+ g := FromArray(b)
+ g.setVersion(4) // Version 4 means randomly generated.
+ g.setVariant(VariantRFC4122)
+
+ return g, nil
+}
+
+// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
+// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
+// and the sample code treats it as a series of bytes, so we do the same here.
+//
+// Some implementations, such as those found on Windows, treat the name as a
+// big-endian UTF16 stream of bytes. If that is desired, the string can be
+// encoded as such before being passed to this function.
+func NewV5(namespace GUID, name []byte) (GUID, error) {
+ b := sha1.New() //nolint:gosec // not used for secure application
+ namespaceBytes := namespace.ToArray()
+ b.Write(namespaceBytes[:])
+ b.Write(name)
+
+ a := [16]byte{}
+ copy(a[:], b.Sum(nil))
+
+ g := FromArray(a)
+ g.setVersion(5) // Version 5 means generated from a string.
+ g.setVariant(VariantRFC4122)
+
+ return g, nil
+}
+
+func fromArray(b [16]byte, order binary.ByteOrder) GUID {
+ var g GUID
+ g.Data1 = order.Uint32(b[0:4])
+ g.Data2 = order.Uint16(b[4:6])
+ g.Data3 = order.Uint16(b[6:8])
+ copy(g.Data4[:], b[8:16])
+ return g
+}
+
+func (g GUID) toArray(order binary.ByteOrder) [16]byte {
+ b := [16]byte{}
+ order.PutUint32(b[0:4], g.Data1)
+ order.PutUint16(b[4:6], g.Data2)
+ order.PutUint16(b[6:8], g.Data3)
+ copy(b[8:16], g.Data4[:])
+ return b
+}
+
+// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
+func FromArray(b [16]byte) GUID {
+ return fromArray(b, binary.BigEndian)
+}
+
+// ToArray returns an array of 16 bytes representing the GUID in big-endian
+// encoding.
+func (g GUID) ToArray() [16]byte {
+ return g.toArray(binary.BigEndian)
+}
+
+// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
+func FromWindowsArray(b [16]byte) GUID {
+ return fromArray(b, binary.LittleEndian)
+}
+
+// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
+// encoding.
+func (g GUID) ToWindowsArray() [16]byte {
+ return g.toArray(binary.LittleEndian)
+}
+
+func (g GUID) String() string {
+ return fmt.Sprintf(
+ "%08x-%04x-%04x-%04x-%012x",
+ g.Data1,
+ g.Data2,
+ g.Data3,
+ g.Data4[:2],
+ g.Data4[2:])
+}
+
+// FromString parses a string containing a GUID and returns the GUID. The only
+// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
+// format.
+func FromString(s string) (GUID, error) {
+ if len(s) != 36 {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+
+ var g GUID
+
+ data1, err := strconv.ParseUint(s[0:8], 16, 32)
+ if err != nil {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+ g.Data1 = uint32(data1)
+
+ data2, err := strconv.ParseUint(s[9:13], 16, 16)
+ if err != nil {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+ g.Data2 = uint16(data2)
+
+ data3, err := strconv.ParseUint(s[14:18], 16, 16)
+ if err != nil {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+ g.Data3 = uint16(data3)
+
+ for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
+ v, err := strconv.ParseUint(s[x:x+2], 16, 8)
+ if err != nil {
+ return GUID{}, fmt.Errorf("invalid GUID %q", s)
+ }
+ g.Data4[i] = uint8(v)
+ }
+
+ return g, nil
+}
+
+func (g *GUID) setVariant(v Variant) {
+ d := g.Data4[0]
+ switch v {
+ case VariantNCS:
+ d = (d & 0x7f)
+ case VariantRFC4122:
+ d = (d & 0x3f) | 0x80
+ case VariantMicrosoft:
+ d = (d & 0x1f) | 0xc0
+ case VariantFuture:
+ d = (d & 0x0f) | 0xe0
+ case VariantUnknown:
+ fallthrough
+ default:
+ panic(fmt.Sprintf("invalid variant: %d", v))
+ }
+ g.Data4[0] = d
+}
+
+// Variant returns the GUID variant, as defined in RFC 4122.
+func (g GUID) Variant() Variant {
+ b := g.Data4[0]
+ if b&0x80 == 0 {
+ return VariantNCS
+ } else if b&0xc0 == 0x80 {
+ return VariantRFC4122
+ } else if b&0xe0 == 0xc0 {
+ return VariantMicrosoft
+ } else if b&0xe0 == 0xe0 {
+ return VariantFuture
+ }
+ return VariantUnknown
+}
+
+func (g *GUID) setVersion(v Version) {
+ g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
+}
+
+// Version returns the GUID version, as defined in RFC 4122.
+func (g GUID) Version() Version {
+ return Version((g.Data3 & 0xF000) >> 12)
+}
+
+// MarshalText returns the textual representation of the GUID.
+func (g GUID) MarshalText() ([]byte, error) {
+ return []byte(g.String()), nil
+}
+
+// UnmarshalText takes the textual representation of a GUID, and unmarhals it
+// into this GUID.
+func (g *GUID) UnmarshalText(text []byte) error {
+ g2, err := FromString(string(text))
+ if err != nil {
+ return err
+ }
+ *g = g2
+ return nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
new file mode 100644
index 00000000..805bd354
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
@@ -0,0 +1,16 @@
+//go:build !windows
+// +build !windows
+
+package guid
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type as that is only available to builds
+// targeted at `windows`. The representation matches that used by native Windows
+// code.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
new file mode 100644
index 00000000..27e45ee5
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
@@ -0,0 +1,13 @@
+//go:build windows
+// +build windows
+
+package guid
+
+import "golang.org/x/sys/windows"
+
+// GUID represents a GUID/UUID. It has the same structure as
+// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
+// that type. It is defined as its own type so that stringification and
+// marshaling can be supported. The representation matches that used by native
+// Windows code.
+type GUID windows.GUID
diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
new file mode 100644
index 00000000..4076d313
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go
@@ -0,0 +1,27 @@
+// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT.
+
+package guid
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VariantUnknown-0]
+ _ = x[VariantNCS-1]
+ _ = x[VariantRFC4122-2]
+ _ = x[VariantMicrosoft-3]
+ _ = x[VariantFuture-4]
+}
+
+const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture"
+
+var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33}
+
+func (i Variant) String() string {
+ if i >= Variant(len(_Variant_index)-1) {
+ return "Variant(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Variant_name[_Variant_index[i]:_Variant_index[i+1]]
+}
diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go
new file mode 100644
index 00000000..d9b90b6e
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/privilege.go
@@ -0,0 +1,196 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "runtime"
+ "sync"
+ "unicode/utf16"
+
+ "golang.org/x/sys/windows"
+)
+
+//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
+//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
+//sys revertToSelf() (err error) = advapi32.RevertToSelf
+//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
+//sys getCurrentThread() (h windows.Handle) = GetCurrentThread
+//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
+//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
+//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
+
+const (
+ //revive:disable-next-line:var-naming ALL_CAPS
+ SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED
+
+ //revive:disable-next-line:var-naming ALL_CAPS
+ ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED
+
+ SeBackupPrivilege = "SeBackupPrivilege"
+ SeRestorePrivilege = "SeRestorePrivilege"
+ SeSecurityPrivilege = "SeSecurityPrivilege"
+)
+
+var (
+ privNames = make(map[string]uint64)
+ privNameMutex sync.Mutex
+)
+
+// PrivilegeError represents an error enabling privileges.
+type PrivilegeError struct {
+ privileges []uint64
+}
+
+func (e *PrivilegeError) Error() string {
+ s := "Could not enable privilege "
+ if len(e.privileges) > 1 {
+ s = "Could not enable privileges "
+ }
+ for i, p := range e.privileges {
+ if i != 0 {
+ s += ", "
+ }
+ s += `"`
+ s += getPrivilegeName(p)
+ s += `"`
+ }
+ return s
+}
+
+// RunWithPrivilege enables a single privilege for a function call.
+func RunWithPrivilege(name string, fn func() error) error {
+ return RunWithPrivileges([]string{name}, fn)
+}
+
+// RunWithPrivileges enables privileges for a function call.
+func RunWithPrivileges(names []string, fn func() error) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ token, err := newThreadToken()
+ if err != nil {
+ return err
+ }
+ defer releaseThreadToken(token)
+ err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
+ if err != nil {
+ return err
+ }
+ return fn()
+}
+
+func mapPrivileges(names []string) ([]uint64, error) {
+ privileges := make([]uint64, 0, len(names))
+ privNameMutex.Lock()
+ defer privNameMutex.Unlock()
+ for _, name := range names {
+ p, ok := privNames[name]
+ if !ok {
+ err := lookupPrivilegeValue("", name, &p)
+ if err != nil {
+ return nil, err
+ }
+ privNames[name] = p
+ }
+ privileges = append(privileges, p)
+ }
+ return privileges, nil
+}
+
+// EnableProcessPrivileges enables privileges globally for the process.
+func EnableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
+}
+
+// DisableProcessPrivileges disables privileges globally for the process.
+func DisableProcessPrivileges(names []string) error {
+ return enableDisableProcessPrivilege(names, 0)
+}
+
+func enableDisableProcessPrivilege(names []string, action uint32) error {
+ privileges, err := mapPrivileges(names)
+ if err != nil {
+ return err
+ }
+
+ p := windows.CurrentProcess()
+ var token windows.Token
+ err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token)
+ if err != nil {
+ return err
+ }
+
+ defer token.Close()
+ return adjustPrivileges(token, privileges, action)
+}
+
+func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
+ var b bytes.Buffer
+ _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
+ for _, p := range privileges {
+ _ = binary.Write(&b, binary.LittleEndian, p)
+ _ = binary.Write(&b, binary.LittleEndian, action)
+ }
+ prevState := make([]byte, b.Len())
+ reqSize := uint32(0)
+ success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize)
+ if !success {
+ return err
+ }
+ if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno
+ return &PrivilegeError{privileges}
+ }
+ return nil
+}
+
+func getPrivilegeName(luid uint64) string {
+ var nameBuffer [256]uint16
+ bufSize := uint32(len(nameBuffer))
+ err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize)
+ if err != nil {
+ return fmt.Sprintf("", luid)
+ }
+
+ var displayNameBuffer [256]uint16
+ displayBufSize := uint32(len(displayNameBuffer))
+ var langID uint32
+ err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID)
+ if err != nil {
+ return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize])))
+ }
+
+ return string(utf16.Decode(displayNameBuffer[:displayBufSize]))
+}
+
+func newThreadToken() (windows.Token, error) {
+ err := impersonateSelf(windows.SecurityImpersonation)
+ if err != nil {
+ return 0, err
+ }
+
+ var token windows.Token
+ err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token)
+ if err != nil {
+ rerr := revertToSelf()
+ if rerr != nil {
+ panic(rerr)
+ }
+ return 0, err
+ }
+ return token, nil
+}
+
+func releaseThreadToken(h windows.Token) {
+ err := revertToSelf()
+ if err != nil {
+ panic(err)
+ }
+ h.Close()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go
new file mode 100644
index 00000000..67d1a104
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/reparse.go
@@ -0,0 +1,131 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "strings"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ reparseTagMountPoint = 0xA0000003
+ reparseTagSymlink = 0xA000000C
+)
+
+type reparseDataBuffer struct {
+ ReparseTag uint32
+ ReparseDataLength uint16
+ Reserved uint16
+ SubstituteNameOffset uint16
+ SubstituteNameLength uint16
+ PrintNameOffset uint16
+ PrintNameLength uint16
+}
+
+// ReparsePoint describes a Win32 symlink or mount point.
+type ReparsePoint struct {
+ Target string
+ IsMountPoint bool
+}
+
+// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
+// mount point reparse point.
+type UnsupportedReparsePointError struct {
+ Tag uint32
+}
+
+func (e *UnsupportedReparsePointError) Error() string {
+ return fmt.Sprintf("unsupported reparse point %x", e.Tag)
+}
+
+// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
+// or a mount point.
+func DecodeReparsePoint(b []byte) (*ReparsePoint, error) {
+ tag := binary.LittleEndian.Uint32(b[0:4])
+ return DecodeReparsePointData(tag, b[8:])
+}
+
+func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) {
+ isMountPoint := false
+ switch tag {
+ case reparseTagMountPoint:
+ isMountPoint = true
+ case reparseTagSymlink:
+ default:
+ return nil, &UnsupportedReparsePointError{tag}
+ }
+ nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6])
+ if !isMountPoint {
+ nameOffset += 4
+ }
+ nameLength := binary.LittleEndian.Uint16(b[6:8])
+ name := make([]uint16, nameLength/2)
+ err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name)
+ if err != nil {
+ return nil, err
+ }
+ return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil
+}
+
+func isDriveLetter(c byte) bool {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
+// mount point.
+func EncodeReparsePoint(rp *ReparsePoint) []byte {
+ // Generate an NT path and determine if this is a relative path.
+ var ntTarget string
+ relative := false
+ if strings.HasPrefix(rp.Target, `\\?\`) {
+ ntTarget = `\??\` + rp.Target[4:]
+ } else if strings.HasPrefix(rp.Target, `\\`) {
+ ntTarget = `\??\UNC\` + rp.Target[2:]
+ } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' {
+ ntTarget = `\??\` + rp.Target
+ } else {
+ ntTarget = rp.Target
+ relative = true
+ }
+
+ // The paths must be NUL-terminated even though they are counted strings.
+ target16 := utf16.Encode([]rune(rp.Target + "\x00"))
+ ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00"))
+
+ size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8
+ size += len(ntTarget16)*2 + len(target16)*2
+
+ tag := uint32(reparseTagMountPoint)
+ if !rp.IsMountPoint {
+ tag = reparseTagSymlink
+ size += 4 // Add room for symlink flags
+ }
+
+ data := reparseDataBuffer{
+ ReparseTag: tag,
+ ReparseDataLength: uint16(size),
+ SubstituteNameOffset: 0,
+ SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2),
+ PrintNameOffset: uint16(len(ntTarget16) * 2),
+ PrintNameLength: uint16((len(target16) - 1) * 2),
+ }
+
+ var b bytes.Buffer
+ _ = binary.Write(&b, binary.LittleEndian, &data)
+ if !rp.IsMountPoint {
+ flags := uint32(0)
+ if relative {
+ flags |= 1
+ }
+ _ = binary.Write(&b, binary.LittleEndian, flags)
+ }
+
+ _ = binary.Write(&b, binary.LittleEndian, ntTarget16)
+ _ = binary.Write(&b, binary.LittleEndian, target16)
+ return b.Bytes()
+}
diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go
new file mode 100644
index 00000000..c3685e98
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/sd.go
@@ -0,0 +1,133 @@
+//go:build windows
+// +build windows
+
+package winio
+
+import (
+ "errors"
+ "fmt"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
+//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW
+//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW
+
+type AccountLookupError struct {
+ Name string
+ Err error
+}
+
+func (e *AccountLookupError) Error() string {
+ if e.Name == "" {
+ return "lookup account: empty account name specified"
+ }
+ var s string
+ switch {
+ case errors.Is(e.Err, windows.ERROR_INVALID_SID):
+ s = "the security ID structure is invalid"
+ case errors.Is(e.Err, windows.ERROR_NONE_MAPPED):
+ s = "not found"
+ default:
+ s = e.Err.Error()
+ }
+ return "lookup account " + e.Name + ": " + s
+}
+
+func (e *AccountLookupError) Unwrap() error { return e.Err }
+
+type SddlConversionError struct {
+ Sddl string
+ Err error
+}
+
+func (e *SddlConversionError) Error() string {
+ return "convert " + e.Sddl + ": " + e.Err.Error()
+}
+
+func (e *SddlConversionError) Unwrap() error { return e.Err }
+
+// LookupSidByName looks up the SID of an account by name
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupSidByName(name string) (sid string, err error) {
+ if name == "" {
+ return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED}
+ }
+
+ var sidSize, sidNameUse, refDomainSize uint32
+ err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+ return "", &AccountLookupError{name, err}
+ }
+ sidBuffer := make([]byte, sidSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ var strBuffer *uint16
+ err = convertSidToStringSid(&sidBuffer[0], &strBuffer)
+ if err != nil {
+ return "", &AccountLookupError{name, err}
+ }
+ sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:])
+ _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer)))
+ return sid, nil
+}
+
+// LookupNameBySid looks up the name of an account by SID
+//
+//revive:disable-next-line:var-naming SID, not Sid
+func LookupNameBySid(sid string) (name string, err error) {
+ if sid == "" {
+ return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED}
+ }
+
+ sidBuffer, err := windows.UTF16PtrFromString(sid)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ var sidPtr *byte
+ if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+ defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck
+
+ var nameSize, refDomainSize, sidNameUse uint32
+ err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse)
+ if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno
+ return "", &AccountLookupError{sid, err}
+ }
+
+ nameBuffer := make([]uint16, nameSize)
+ refDomainBuffer := make([]uint16, refDomainSize)
+ err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse)
+ if err != nil {
+ return "", &AccountLookupError{sid, err}
+ }
+
+ name = windows.UTF16ToString(nameBuffer)
+ return name, nil
+}
+
+func SddlToSecurityDescriptor(sddl string) ([]byte, error) {
+ sd, err := windows.SecurityDescriptorFromString(sddl)
+ if err != nil {
+ return nil, &SddlConversionError{Sddl: sddl, Err: err}
+ }
+ b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
+ return b, nil
+}
+
+func SecurityDescriptorToSddl(sd []byte) (string, error) {
+ if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l {
+ return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE)
+ }
+ s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0]))
+ return s.String(), nil
+}
diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go
new file mode 100644
index 00000000..a6ca111b
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/syscall.go
@@ -0,0 +1,5 @@
+//go:build windows
+
+package winio
+
+//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go
diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
new file mode 100644
index 00000000..89b66eda
--- /dev/null
+++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
@@ -0,0 +1,378 @@
+//go:build windows
+
+// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
+
+package winio
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ return e
+}
+
+var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modntdll = windows.NewLazySystemDLL("ntdll.dll")
+ modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
+
+ procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges")
+ procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
+ procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW")
+ procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf")
+ procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
+ procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW")
+ procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
+ procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW")
+ procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW")
+ procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken")
+ procRevertToSelf = modadvapi32.NewProc("RevertToSelf")
+ procBackupRead = modkernel32.NewProc("BackupRead")
+ procBackupWrite = modkernel32.NewProc("BackupWrite")
+ procCancelIoEx = modkernel32.NewProc("CancelIoEx")
+ procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
+ procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
+ procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
+ procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe")
+ procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
+ procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
+ procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
+ procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
+ procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
+ procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
+ procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
+ procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
+ procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
+ procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
+)
+
+func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) {
+ var _p0 uint32
+ if releaseAll {
+ _p0 = 1
+ }
+ r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize)))
+ success = r0 != 0
+ if true {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func convertSidToStringSid(sid *byte, str **uint16) (err error) {
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func convertStringSidToSid(str *uint16, sid **byte) (err error) {
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func impersonateSelf(level uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(accountName)
+ if err != nil {
+ return
+ }
+ return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse)
+}
+
+func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId)
+}
+
+func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeName(_p0, luid, buffer, size)
+}
+
+func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(systemName)
+ if err != nil {
+ return
+ }
+ var _p1 *uint16
+ _p1, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _lookupPrivilegeValue(_p0, _p1, luid)
+}
+
+func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) {
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) {
+ var _p0 uint32
+ if openAsSelf {
+ _p0 = 1
+ }
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func revertToSelf() (err error) {
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ }
+ r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) {
+ var _p0 *byte
+ if len(b) > 0 {
+ _p0 = &b[0]
+ }
+ var _p1 uint32
+ if abort {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if processSecurity {
+ _p2 = 1
+ }
+ r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) {
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) {
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount))
+ newport = windows.Handle(r0)
+ if newport == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
+ var _p0 *uint16
+ _p0, err = syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return
+ }
+ return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa)
+}
+
+func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) {
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
+ handle = windows.Handle(r0)
+ if handle == windows.InvalidHandle {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func disconnectNamedPipe(pipe windows.Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getCurrentThread() (h windows.Handle) {
+ r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr())
+ h = windows.Handle(r0)
+ return
+}
+
+func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) {
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
+ status = ntStatus(r0)
+ return
+}
+
+func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl)))
+ status = ntStatus(r0)
+ return
+}
+
+func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) {
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved))
+ status = ntStatus(r0)
+ return
+}
+
+func rtlNtStatusToDosError(status ntStatus) (winerr error) {
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status))
+ if r0 != 0 {
+ winerr = syscall.Errno(r0)
+ }
+ return
+}
+
+func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
+ var _p0 uint32
+ if wait {
+ _p0 = 1
+ }
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go
index 19229226..45642ad3 100644
--- a/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go
+++ b/vendor/github.com/conductorone/baton-sdk/internal/connector/connector.go
@@ -35,6 +35,7 @@ const listenerFdEnv = "BATON_CONNECTOR_SERVICE_LISTENER_FD"
type connectorClient struct {
connectorV2.ResourceTypesServiceClient
connectorV2.ResourcesServiceClient
+ connectorV2.ResourceGetterServiceClient
connectorV2.EntitlementsServiceClient
connectorV2.GrantsServiceClient
connectorV2.ConnectorServiceClient
@@ -55,13 +56,14 @@ var ErrConnectorNotImplemented = errors.New("client does not implement connector
type wrapper struct {
mtx sync.RWMutex
- server types.ConnectorServer
- client types.ConnectorClient
- serverStdin io.WriteCloser
- conn *grpc.ClientConn
- provisioningEnabled bool
- ticketingEnabled bool
- fullSyncDisabled bool
+ server types.ConnectorServer
+ client types.ConnectorClient
+ serverStdin io.WriteCloser
+ conn *grpc.ClientConn
+ provisioningEnabled bool
+ ticketingEnabled bool
+ fullSyncDisabled bool
+ targetedSyncResourceIDs []string
rateLimiter ratelimitV1.RateLimiterServiceServer
rlCfg *ratelimitV1.RateLimiterConfig
@@ -115,6 +117,13 @@ func WithTicketingEnabled() Option {
}
}
+func WithTargetedSyncResourceIDs(resourceIDs []string) Option {
+ return func(ctx context.Context, w *wrapper) error {
+ w.targetedSyncResourceIDs = resourceIDs
+ return nil
+ }
+}
+
// NewConnectorWrapper returns a connector wrapper for running connector services locally.
func NewWrapper(ctx context.Context, server interface{}, opts ...Option) (*wrapper, error) {
connectorServer, isServer := server.(types.ConnectorServer)
@@ -372,6 +381,7 @@ func Register(ctx context.Context, s grpc.ServiceRegistrar, srv types.ConnectorS
connectorV2.RegisterResourceTypesServiceServer(s, srv)
connectorV2.RegisterAssetServiceServer(s, srv)
connectorV2.RegisterEventServiceServer(s, srv)
+ connectorV2.RegisterResourceGetterServiceServer(s, srv)
if opts.TicketingEnabled {
connectorV2.RegisterTicketsServiceServer(s, srv)
@@ -421,5 +431,6 @@ func NewConnectorClient(ctx context.Context, cc grpc.ClientConnInterface) types.
EventServiceClient: connectorV2.NewEventServiceClient(cc),
TicketsServiceClient: connectorV2.NewTicketsServiceClient(cc),
ActionServiceClient: connectorV2.NewActionServiceClient(cc),
+ ResourceGetterServiceClient: connectorV2.NewResourceGetterServiceClient(cc),
}
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go
new file mode 100644
index 00000000..72063239
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.go
@@ -0,0 +1,130 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.4
+// protoc (unknown)
+// source: c1/connector/v2/annotation_raw_id.proto
+
+package v2
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Raw ID from whatever API the resource/entitlement/grant came from.
+type RawId struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *RawId) Reset() {
+ *x = RawId{}
+ mi := &file_c1_connector_v2_annotation_raw_id_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *RawId) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RawId) ProtoMessage() {}
+
+func (x *RawId) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_annotation_raw_id_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RawId.ProtoReflect.Descriptor instead.
+func (*RawId) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_annotation_raw_id_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RawId) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+var File_c1_connector_v2_annotation_raw_id_proto protoreflect.FileDescriptor
+
+var file_c1_connector_v2_annotation_raw_id_proto_rawDesc = string([]byte{
+ 0x0a, 0x27, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76,
+ 0x32, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x77,
+ 0x5f, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x22, 0x17, 0x0a, 0x05, 0x52, 0x61,
+ 0x77, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x02, 0x69, 0x64, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62,
+ 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+})
+
+var (
+ file_c1_connector_v2_annotation_raw_id_proto_rawDescOnce sync.Once
+ file_c1_connector_v2_annotation_raw_id_proto_rawDescData []byte
+)
+
+func file_c1_connector_v2_annotation_raw_id_proto_rawDescGZIP() []byte {
+ file_c1_connector_v2_annotation_raw_id_proto_rawDescOnce.Do(func() {
+ file_c1_connector_v2_annotation_raw_id_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_raw_id_proto_rawDesc), len(file_c1_connector_v2_annotation_raw_id_proto_rawDesc)))
+ })
+ return file_c1_connector_v2_annotation_raw_id_proto_rawDescData
+}
+
+var file_c1_connector_v2_annotation_raw_id_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_c1_connector_v2_annotation_raw_id_proto_goTypes = []any{
+ (*RawId)(nil), // 0: c1.connector.v2.RawId
+}
+var file_c1_connector_v2_annotation_raw_id_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_c1_connector_v2_annotation_raw_id_proto_init() }
+func file_c1_connector_v2_annotation_raw_id_proto_init() {
+ if File_c1_connector_v2_annotation_raw_id_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_annotation_raw_id_proto_rawDesc), len(file_c1_connector_v2_annotation_raw_id_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_c1_connector_v2_annotation_raw_id_proto_goTypes,
+ DependencyIndexes: file_c1_connector_v2_annotation_raw_id_proto_depIdxs,
+ MessageInfos: file_c1_connector_v2_annotation_raw_id_proto_msgTypes,
+ }.Build()
+ File_c1_connector_v2_annotation_raw_id_proto = out.File
+ file_c1_connector_v2_annotation_raw_id_proto_goTypes = nil
+ file_c1_connector_v2_annotation_raw_id_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.validate.go
new file mode 100644
index 00000000..f2bb5688
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/annotation_raw_id.pb.validate.go
@@ -0,0 +1,136 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: c1/connector/v2/annotation_raw_id.proto
+
+package v2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on RawId with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RawId) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RawId with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in RawIdMultiError, or nil if none found.
+func (m *RawId) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RawId) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Id
+
+ if len(errors) > 0 {
+ return RawIdMultiError(errors)
+ }
+
+ return nil
+}
+
+// RawIdMultiError is an error wrapping multiple validation errors returned by
+// RawId.ValidateAll() if the designated constraints aren't met.
+type RawIdMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RawIdMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RawIdMultiError) AllErrors() []error { return m }
+
+// RawIdValidationError is the validation error returned by RawId.Validate if
+// the designated constraints aren't met.
+type RawIdValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RawIdValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RawIdValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RawIdValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RawIdValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RawIdValidationError) ErrorName() string { return "RawIdValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RawIdValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRawId.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RawIdValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RawIdValidationError{}
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go
index 51436ad9..3027d370 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/connector.pb.go
@@ -38,6 +38,8 @@ const (
Capability_CAPABILITY_RESOURCE_DELETE Capability = 8
Capability_CAPABILITY_SYNC_SECRETS Capability = 9
Capability_CAPABILITY_ACTIONS Capability = 10
+ Capability_CAPABILITY_TARGETED_SYNC Capability = 11
+ Capability_CAPABILITY_EVENT_FEED_V2 Capability = 12
)
// Enum value maps for Capability.
@@ -54,6 +56,8 @@ var (
8: "CAPABILITY_RESOURCE_DELETE",
9: "CAPABILITY_SYNC_SECRETS",
10: "CAPABILITY_ACTIONS",
+ 11: "CAPABILITY_TARGETED_SYNC",
+ 12: "CAPABILITY_EVENT_FEED_V2",
}
Capability_value = map[string]int32{
"CAPABILITY_UNSPECIFIED": 0,
@@ -67,6 +71,8 @@ var (
"CAPABILITY_RESOURCE_DELETE": 8,
"CAPABILITY_SYNC_SECRETS": 9,
"CAPABILITY_ACTIONS": 10,
+ "CAPABILITY_TARGETED_SYNC": 11,
+ "CAPABILITY_EVENT_FEED_V2": 12,
}
)
@@ -1470,7 +1476,7 @@ var file_c1_connector_v2_connector_proto_rawDesc = string([]byte{
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
0x69, 0x65, 0x6c, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a,
- 0xca, 0x02, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x1a,
+ 0x86, 0x03, 0x0a, 0x0a, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x1a,
0x0a, 0x16, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53,
0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x41,
0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x53, 0x49,
@@ -1490,49 +1496,53 @@ var file_c1_connector_v2_connector_proto_rawDesc = string([]byte{
0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x08, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x41, 0x50, 0x41, 0x42,
0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x43, 0x52, 0x45,
0x54, 0x53, 0x10, 0x09, 0x12, 0x16, 0x0a, 0x12, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49,
- 0x54, 0x59, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x0a, 0x2a, 0xf2, 0x01, 0x0a,
- 0x20, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69,
- 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f,
- 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41,
- 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
- 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49,
- 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44,
- 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f,
- 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x37, 0x0a, 0x33, 0x43,
- 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c,
- 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
- 0x52, 0x44, 0x10, 0x02, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49,
- 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e,
- 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x53, 0x4f, 0x10,
- 0x03, 0x32, 0xeb, 0x02, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x31, 0x2e,
+ 0x54, 0x59, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x0a, 0x12, 0x1c, 0x0a, 0x18,
+ 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45,
+ 0x54, 0x45, 0x44, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x0b, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x41,
+ 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x46,
+ 0x45, 0x45, 0x44, 0x5f, 0x56, 0x32, 0x10, 0x0c, 0x2a, 0xf2, 0x01, 0x0a, 0x20, 0x43, 0x61, 0x70,
+ 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a,
+ 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41,
+ 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59,
+ 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49,
+ 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x5f, 0x50, 0x41, 0x53,
+ 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x37, 0x0a, 0x33, 0x43, 0x41, 0x50, 0x41, 0x42,
+ 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45,
+ 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52,
+ 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x02,
+ 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x41, 0x50, 0x41, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44,
+ 0x45, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c,
+ 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x53, 0x4f, 0x10, 0x03, 0x32, 0xeb, 0x02,
+ 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x12, 0x78, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x33, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x08,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e,
0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x6f, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x30, 0x2e, 0x63,
- 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31,
- 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32,
- 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x6c, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2f, 0x2e, 0x63,
- 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43,
- 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
- 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e,
- 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e,
- 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
- 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
- 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e,
- 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a,
+ 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x12, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x65, 0x61, 0x6e,
+ 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x65, 0x61,
+ 0x6e, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63,
+ 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b,
+ 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go
index aa8b9b60..0da9efb1 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.go
@@ -24,13 +24,65 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
+type EventType int32
+
+const (
+ EventType_EVENT_TYPE_UNSPECIFIED EventType = 0
+ EventType_EVENT_TYPE_USAGE EventType = 1
+ EventType_EVENT_TYPE_RESOURCE_CHANGE EventType = 4
+)
+
+// Enum value maps for EventType.
+var (
+ EventType_name = map[int32]string{
+ 0: "EVENT_TYPE_UNSPECIFIED",
+ 1: "EVENT_TYPE_USAGE",
+ 4: "EVENT_TYPE_RESOURCE_CHANGE",
+ }
+ EventType_value = map[string]int32{
+ "EVENT_TYPE_UNSPECIFIED": 0,
+ "EVENT_TYPE_USAGE": 1,
+ "EVENT_TYPE_RESOURCE_CHANGE": 4,
+ }
+)
+
+func (x EventType) Enum() *EventType {
+ p := new(EventType)
+ *p = x
+ return p
+}
+
+func (x EventType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (EventType) Descriptor() protoreflect.EnumDescriptor {
+ return file_c1_connector_v2_event_feed_proto_enumTypes[0].Descriptor()
+}
+
+func (EventType) Type() protoreflect.EnumType {
+ return &file_c1_connector_v2_event_feed_proto_enumTypes[0]
+}
+
+func (x EventType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use EventType.Descriptor instead.
+func (EventType) EnumDescriptor() ([]byte, []int) {
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{0}
+}
+
type ListEventsRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Can function like a page token but also can be arbitrary to resume a feed at any point
- Cursor string `protobuf:"bytes,1,opt,name=cursor,proto3" json:"cursor,omitempty"`
- StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"`
- PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ Cursor string `protobuf:"bytes,1,opt,name=cursor,proto3" json:"cursor,omitempty"`
+ StartAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_at,json=startAt,proto3" json:"start_at,omitempty"`
+ PageSize uint32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ // Used to specify a specific event feed to list events from.
+ // If not provided, the connector will use the old event feed.
+ EventFeedId string `protobuf:"bytes,5,opt,name=event_feed_id,json=eventFeedId,proto3" json:"event_feed_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -93,6 +145,13 @@ func (x *ListEventsRequest) GetAnnotations() []*anypb.Any {
return nil
}
+func (x *ListEventsRequest) GetEventFeedId() string {
+ if x != nil {
+ return x.EventFeedId
+ }
+ return ""
+}
+
type ListEventsResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Events []*Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"`
@@ -161,6 +220,102 @@ func (x *ListEventsResponse) GetAnnotations() []*anypb.Any {
return nil
}
+type ListEventFeedsRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListEventFeedsRequest) Reset() {
+ *x = ListEventFeedsRequest{}
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListEventFeedsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListEventFeedsRequest) ProtoMessage() {}
+
+func (x *ListEventFeedsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListEventFeedsRequest.ProtoReflect.Descriptor instead.
+func (*ListEventFeedsRequest) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListEventFeedsRequest) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+type ListEventFeedsResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ List []*EventFeedMetadata `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListEventFeedsResponse) Reset() {
+ *x = ListEventFeedsResponse{}
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListEventFeedsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListEventFeedsResponse) ProtoMessage() {}
+
+func (x *ListEventFeedsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListEventFeedsResponse.ProtoReflect.Descriptor instead.
+func (*ListEventFeedsResponse) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListEventFeedsResponse) GetList() []*EventFeedMetadata {
+ if x != nil {
+ return x.List
+ }
+ return nil
+}
+
+func (x *ListEventFeedsResponse) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
type Event struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -173,6 +328,7 @@ type Event struct {
// *Event_UsageEvent
// *Event_GrantEvent
// *Event_RevokeEvent
+ // *Event_ResourceChangeEvent
Event isEvent_Event `protobuf_oneof:"event"`
// May contain resources for targets, actor, or items referenced in events
Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"`
@@ -182,7 +338,7 @@ type Event struct {
func (x *Event) Reset() {
*x = Event{}
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -194,7 +350,7 @@ func (x *Event) String() string {
func (*Event) ProtoMessage() {}
func (x *Event) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[2]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -207,7 +363,7 @@ func (x *Event) ProtoReflect() protoreflect.Message {
// Deprecated: Use Event.ProtoReflect.Descriptor instead.
func (*Event) Descriptor() ([]byte, []int) {
- return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{2}
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{4}
}
func (x *Event) GetId() string {
@@ -258,6 +414,15 @@ func (x *Event) GetRevokeEvent() *RevokeEvent {
return nil
}
+func (x *Event) GetResourceChangeEvent() *ResourceChangeEvent {
+ if x != nil {
+ if x, ok := x.Event.(*Event_ResourceChangeEvent); ok {
+ return x.ResourceChangeEvent
+ }
+ }
+ return nil
+}
+
func (x *Event) GetAnnotations() []*anypb.Any {
if x != nil {
return x.Annotations
@@ -281,12 +446,18 @@ type Event_RevokeEvent struct {
RevokeEvent *RevokeEvent `protobuf:"bytes,102,opt,name=revoke_event,json=revokeEvent,proto3,oneof"`
}
+type Event_ResourceChangeEvent struct {
+ ResourceChangeEvent *ResourceChangeEvent `protobuf:"bytes,103,opt,name=resource_change_event,json=resourceChangeEvent,proto3,oneof"`
+}
+
func (*Event_UsageEvent) isEvent_Event() {}
func (*Event_GrantEvent) isEvent_Event() {}
func (*Event_RevokeEvent) isEvent_Event() {}
+func (*Event_ResourceChangeEvent) isEvent_Event() {}
+
type UsageEvent struct {
state protoimpl.MessageState `protogen:"open.v1"`
TargetResource *Resource `protobuf:"bytes,1,opt,name=target_resource,json=targetResource,proto3" json:"target_resource,omitempty"`
@@ -297,7 +468,7 @@ type UsageEvent struct {
func (x *UsageEvent) Reset() {
*x = UsageEvent{}
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -309,7 +480,7 @@ func (x *UsageEvent) String() string {
func (*UsageEvent) ProtoMessage() {}
func (x *UsageEvent) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[3]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -322,7 +493,7 @@ func (x *UsageEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use UsageEvent.ProtoReflect.Descriptor instead.
func (*UsageEvent) Descriptor() ([]byte, []int) {
- return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{3}
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{5}
}
func (x *UsageEvent) GetTargetResource() *Resource {
@@ -349,7 +520,7 @@ type GrantEvent struct {
func (x *GrantEvent) Reset() {
*x = GrantEvent{}
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -361,7 +532,7 @@ func (x *GrantEvent) String() string {
func (*GrantEvent) ProtoMessage() {}
func (x *GrantEvent) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[4]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -374,7 +545,7 @@ func (x *GrantEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use GrantEvent.ProtoReflect.Descriptor instead.
func (*GrantEvent) Descriptor() ([]byte, []int) {
- return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{4}
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{6}
}
func (x *GrantEvent) GetGrant() *Grant {
@@ -394,7 +565,7 @@ type RevokeEvent struct {
func (x *RevokeEvent) Reset() {
*x = RevokeEvent{}
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -406,7 +577,7 @@ func (x *RevokeEvent) String() string {
func (*RevokeEvent) ProtoMessage() {}
func (x *RevokeEvent) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_event_feed_proto_msgTypes[5]
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -419,7 +590,7 @@ func (x *RevokeEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use RevokeEvent.ProtoReflect.Descriptor instead.
func (*RevokeEvent) Descriptor() ([]byte, []int) {
- return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{5}
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{7}
}
func (x *RevokeEvent) GetEntitlement() *Entitlement {
@@ -436,6 +607,112 @@ func (x *RevokeEvent) GetPrincipal() *Resource {
return nil
}
+// generic light weight event indicating a resource was changed
+type ResourceChangeEvent struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+ ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ResourceChangeEvent) Reset() {
+ *x = ResourceChangeEvent{}
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourceChangeEvent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceChangeEvent) ProtoMessage() {}
+
+func (x *ResourceChangeEvent) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceChangeEvent.ProtoReflect.Descriptor instead.
+func (*ResourceChangeEvent) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ResourceChangeEvent) GetResourceId() *ResourceId {
+ if x != nil {
+ return x.ResourceId
+ }
+ return nil
+}
+
+func (x *ResourceChangeEvent) GetParentResourceId() *ResourceId {
+ if x != nil {
+ return x.ParentResourceId
+ }
+ return nil
+}
+
+type EventFeedMetadata struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // unique identifier for the event feed
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ SupportedEventTypes []EventType `protobuf:"varint,2,rep,packed,name=supported_event_types,json=supportedEventTypes,proto3,enum=c1.connector.v2.EventType" json:"supported_event_types,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EventFeedMetadata) Reset() {
+ *x = EventFeedMetadata{}
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EventFeedMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EventFeedMetadata) ProtoMessage() {}
+
+func (x *EventFeedMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_event_feed_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EventFeedMetadata.ProtoReflect.Descriptor instead.
+func (*EventFeedMetadata) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_event_feed_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *EventFeedMetadata) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *EventFeedMetadata) GetSupportedEventTypes() []EventType {
+ if x != nil {
+ return x.SupportedEventTypes
+ }
+ return nil
+}
+
var File_c1_connector_v2_event_feed_proto protoreflect.FileDescriptor
var file_c1_connector_v2_event_feed_proto_rawDesc = string([]byte{
@@ -453,7 +730,7 @@ var file_c1_connector_v2_event_feed_proto_rawDesc = string([]byte{
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
- 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x02, 0x0a, 0x11, 0x4c, 0x69, 0x73,
0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25,
0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d,
0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63,
@@ -466,73 +743,129 @@ var file_c1_connector_v2_event_feed_proto_rawDesc = string([]byte{
0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
- 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xbe, 0x01,
- 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76,
- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x20,
- 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x68,
- 0x61, 0x73, 0x5f, 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68,
- 0x61, 0x73, 0x4d, 0x6f, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
- 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd8,
- 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75,
- 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x61,
- 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x65,
- 0x76, 0x65, 0x6e, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61,
- 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x0c, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x5f,
- 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31,
+ 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x31, 0x0a,
+ 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x08,
+ 0xd0, 0x01, 0x01, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x49, 0x64,
+ 0x22, 0xbe, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52,
+ 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01,
+ 0x28, 0x80, 0x20, 0xd0, 0x01, 0x01, 0x52, 0x06, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x19,
+ 0x0a, 0x08, 0x68, 0x61, 0x73, 0x5f, 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x07, 0x68, 0x61, 0x73, 0x4d, 0x6f, 0x72, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0x4f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65,
+ 0x65, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x46, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a,
+ 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+ 0x04, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
+ 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb4, 0x03,
+ 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72,
+ 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72,
+ 0x65, 0x64, 0x41, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x75, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x73, 0x61, 0x67,
+ 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x75, 0x73, 0x61, 0x67, 0x65, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x5f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e,
+ 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x0c, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x5f, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x76,
+ 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x76, 0x6f,
+ 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x5a, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x13,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x22, 0x92, 0x01, 0x0a, 0x0a, 0x55, 0x73, 0x61, 0x67, 0x65, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63,
+ 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72,
+ 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x3a, 0x0a, 0x0a, 0x47, 0x72, 0x61,
+ 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52, 0x05,
+ 0x67, 0x72, 0x61, 0x6e, 0x74, 0x22, 0x86, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70,
+ 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x22, 0xa8,
+ 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67,
+ 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x46, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31,
0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65,
- 0x76, 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x76,
- 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x92, 0x01, 0x0a, 0x0a, 0x55, 0x73,
- 0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0e, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0e,
- 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
- 0x0d, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x3a,
- 0x0a, 0x0a, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x05,
- 0x67, 0x72, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31,
- 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72,
- 0x61, 0x6e, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x22, 0x86, 0x01, 0x0a, 0x0b, 0x52,
- 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76,
- 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72,
- 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e,
- 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69,
- 0x70, 0x61, 0x6c, 0x32, 0x65, 0x0a, 0x0c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x12, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69,
- 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74,
- 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f,
- 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
- 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x49,
+ 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x22, 0x90, 0x01, 0x0a, 0x11, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x1a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07,
+ 0x72, 0x05, 0x20, 0x01, 0x28, 0x80, 0x08, 0x52, 0x02, 0x69, 0x64, 0x12, 0x5f, 0x0a, 0x15, 0x73,
+ 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x92, 0x01, 0x09, 0x18, 0x01,
+ 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
+ 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x2a, 0x69, 0x0a, 0x09,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45,
+ 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x53, 0x41, 0x47, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x45,
+ 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x04, 0x22, 0x04, 0x08, 0x02, 0x10,
+ 0x02, 0x22, 0x04, 0x08, 0x03, 0x10, 0x03, 0x32, 0xc8, 0x01, 0x0a, 0x0c, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x61, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64,
+ 0x73, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65,
+ 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61,
+ 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
})
var (
@@ -547,42 +880,58 @@ func file_c1_connector_v2_event_feed_proto_rawDescGZIP() []byte {
return file_c1_connector_v2_event_feed_proto_rawDescData
}
-var file_c1_connector_v2_event_feed_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_c1_connector_v2_event_feed_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_c1_connector_v2_event_feed_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_c1_connector_v2_event_feed_proto_goTypes = []any{
- (*ListEventsRequest)(nil), // 0: c1.connector.v2.ListEventsRequest
- (*ListEventsResponse)(nil), // 1: c1.connector.v2.ListEventsResponse
- (*Event)(nil), // 2: c1.connector.v2.Event
- (*UsageEvent)(nil), // 3: c1.connector.v2.UsageEvent
- (*GrantEvent)(nil), // 4: c1.connector.v2.GrantEvent
- (*RevokeEvent)(nil), // 5: c1.connector.v2.RevokeEvent
- (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
- (*anypb.Any)(nil), // 7: google.protobuf.Any
- (*Resource)(nil), // 8: c1.connector.v2.Resource
- (*Grant)(nil), // 9: c1.connector.v2.Grant
- (*Entitlement)(nil), // 10: c1.connector.v2.Entitlement
+ (EventType)(0), // 0: c1.connector.v2.EventType
+ (*ListEventsRequest)(nil), // 1: c1.connector.v2.ListEventsRequest
+ (*ListEventsResponse)(nil), // 2: c1.connector.v2.ListEventsResponse
+ (*ListEventFeedsRequest)(nil), // 3: c1.connector.v2.ListEventFeedsRequest
+ (*ListEventFeedsResponse)(nil), // 4: c1.connector.v2.ListEventFeedsResponse
+ (*Event)(nil), // 5: c1.connector.v2.Event
+ (*UsageEvent)(nil), // 6: c1.connector.v2.UsageEvent
+ (*GrantEvent)(nil), // 7: c1.connector.v2.GrantEvent
+ (*RevokeEvent)(nil), // 8: c1.connector.v2.RevokeEvent
+ (*ResourceChangeEvent)(nil), // 9: c1.connector.v2.ResourceChangeEvent
+ (*EventFeedMetadata)(nil), // 10: c1.connector.v2.EventFeedMetadata
+ (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+ (*anypb.Any)(nil), // 12: google.protobuf.Any
+ (*Resource)(nil), // 13: c1.connector.v2.Resource
+ (*Grant)(nil), // 14: c1.connector.v2.Grant
+ (*Entitlement)(nil), // 15: c1.connector.v2.Entitlement
+ (*ResourceId)(nil), // 16: c1.connector.v2.ResourceId
}
var file_c1_connector_v2_event_feed_proto_depIdxs = []int32{
- 6, // 0: c1.connector.v2.ListEventsRequest.start_at:type_name -> google.protobuf.Timestamp
- 7, // 1: c1.connector.v2.ListEventsRequest.annotations:type_name -> google.protobuf.Any
- 2, // 2: c1.connector.v2.ListEventsResponse.events:type_name -> c1.connector.v2.Event
- 7, // 3: c1.connector.v2.ListEventsResponse.annotations:type_name -> google.protobuf.Any
- 6, // 4: c1.connector.v2.Event.occurred_at:type_name -> google.protobuf.Timestamp
- 3, // 5: c1.connector.v2.Event.usage_event:type_name -> c1.connector.v2.UsageEvent
- 4, // 6: c1.connector.v2.Event.grant_event:type_name -> c1.connector.v2.GrantEvent
- 5, // 7: c1.connector.v2.Event.revoke_event:type_name -> c1.connector.v2.RevokeEvent
- 7, // 8: c1.connector.v2.Event.annotations:type_name -> google.protobuf.Any
- 8, // 9: c1.connector.v2.UsageEvent.target_resource:type_name -> c1.connector.v2.Resource
- 8, // 10: c1.connector.v2.UsageEvent.actor_resource:type_name -> c1.connector.v2.Resource
- 9, // 11: c1.connector.v2.GrantEvent.grant:type_name -> c1.connector.v2.Grant
- 10, // 12: c1.connector.v2.RevokeEvent.entitlement:type_name -> c1.connector.v2.Entitlement
- 8, // 13: c1.connector.v2.RevokeEvent.principal:type_name -> c1.connector.v2.Resource
- 0, // 14: c1.connector.v2.EventService.ListEvents:input_type -> c1.connector.v2.ListEventsRequest
- 1, // 15: c1.connector.v2.EventService.ListEvents:output_type -> c1.connector.v2.ListEventsResponse
- 15, // [15:16] is the sub-list for method output_type
- 14, // [14:15] is the sub-list for method input_type
- 14, // [14:14] is the sub-list for extension type_name
- 14, // [14:14] is the sub-list for extension extendee
- 0, // [0:14] is the sub-list for field type_name
+ 11, // 0: c1.connector.v2.ListEventsRequest.start_at:type_name -> google.protobuf.Timestamp
+ 12, // 1: c1.connector.v2.ListEventsRequest.annotations:type_name -> google.protobuf.Any
+ 5, // 2: c1.connector.v2.ListEventsResponse.events:type_name -> c1.connector.v2.Event
+ 12, // 3: c1.connector.v2.ListEventsResponse.annotations:type_name -> google.protobuf.Any
+ 12, // 4: c1.connector.v2.ListEventFeedsRequest.annotations:type_name -> google.protobuf.Any
+ 10, // 5: c1.connector.v2.ListEventFeedsResponse.list:type_name -> c1.connector.v2.EventFeedMetadata
+ 12, // 6: c1.connector.v2.ListEventFeedsResponse.annotations:type_name -> google.protobuf.Any
+ 11, // 7: c1.connector.v2.Event.occurred_at:type_name -> google.protobuf.Timestamp
+ 6, // 8: c1.connector.v2.Event.usage_event:type_name -> c1.connector.v2.UsageEvent
+ 7, // 9: c1.connector.v2.Event.grant_event:type_name -> c1.connector.v2.GrantEvent
+ 8, // 10: c1.connector.v2.Event.revoke_event:type_name -> c1.connector.v2.RevokeEvent
+ 9, // 11: c1.connector.v2.Event.resource_change_event:type_name -> c1.connector.v2.ResourceChangeEvent
+ 12, // 12: c1.connector.v2.Event.annotations:type_name -> google.protobuf.Any
+ 13, // 13: c1.connector.v2.UsageEvent.target_resource:type_name -> c1.connector.v2.Resource
+ 13, // 14: c1.connector.v2.UsageEvent.actor_resource:type_name -> c1.connector.v2.Resource
+ 14, // 15: c1.connector.v2.GrantEvent.grant:type_name -> c1.connector.v2.Grant
+ 15, // 16: c1.connector.v2.RevokeEvent.entitlement:type_name -> c1.connector.v2.Entitlement
+ 13, // 17: c1.connector.v2.RevokeEvent.principal:type_name -> c1.connector.v2.Resource
+ 16, // 18: c1.connector.v2.ResourceChangeEvent.resource_id:type_name -> c1.connector.v2.ResourceId
+ 16, // 19: c1.connector.v2.ResourceChangeEvent.parent_resource_id:type_name -> c1.connector.v2.ResourceId
+ 0, // 20: c1.connector.v2.EventFeedMetadata.supported_event_types:type_name -> c1.connector.v2.EventType
+ 1, // 21: c1.connector.v2.EventService.ListEvents:input_type -> c1.connector.v2.ListEventsRequest
+ 3, // 22: c1.connector.v2.EventService.ListEventFeeds:input_type -> c1.connector.v2.ListEventFeedsRequest
+ 2, // 23: c1.connector.v2.EventService.ListEvents:output_type -> c1.connector.v2.ListEventsResponse
+ 4, // 24: c1.connector.v2.EventService.ListEventFeeds:output_type -> c1.connector.v2.ListEventFeedsResponse
+ 23, // [23:25] is the sub-list for method output_type
+ 21, // [21:23] is the sub-list for method input_type
+ 21, // [21:21] is the sub-list for extension type_name
+ 21, // [21:21] is the sub-list for extension extendee
+ 0, // [0:21] is the sub-list for field type_name
}
func init() { file_c1_connector_v2_event_feed_proto_init() }
@@ -593,23 +942,25 @@ func file_c1_connector_v2_event_feed_proto_init() {
file_c1_connector_v2_entitlement_proto_init()
file_c1_connector_v2_grant_proto_init()
file_c1_connector_v2_resource_proto_init()
- file_c1_connector_v2_event_feed_proto_msgTypes[2].OneofWrappers = []any{
+ file_c1_connector_v2_event_feed_proto_msgTypes[4].OneofWrappers = []any{
(*Event_UsageEvent)(nil),
(*Event_GrantEvent)(nil),
(*Event_RevokeEvent)(nil),
+ (*Event_ResourceChangeEvent)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_event_feed_proto_rawDesc), len(file_c1_connector_v2_event_feed_proto_rawDesc)),
- NumEnums: 0,
- NumMessages: 6,
+ NumEnums: 1,
+ NumMessages: 10,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_c1_connector_v2_event_feed_proto_goTypes,
DependencyIndexes: file_c1_connector_v2_event_feed_proto_depIdxs,
+ EnumInfos: file_c1_connector_v2_event_feed_proto_enumTypes,
MessageInfos: file_c1_connector_v2_event_feed_proto_msgTypes,
}.Build()
File_c1_connector_v2_event_feed_proto = out.File
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.validate.go
index 6bc19599..bc384bf6 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.validate.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed.pb.validate.go
@@ -150,6 +150,21 @@ func (m *ListEventsRequest) validate(all bool) error {
}
+ if m.GetEventFeedId() != "" {
+
+ if l := len(m.GetEventFeedId()); l < 1 || l > 1024 {
+ err := ListEventsRequestValidationError{
+ field: "EventFeedId",
+ reason: "value length must be between 1 and 1024 bytes, inclusive",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
if len(errors) > 0 {
return ListEventsRequestMultiError(errors)
}
@@ -417,6 +432,312 @@ var _ interface {
ErrorName() string
} = ListEventsResponseValidationError{}
+// Validate checks the field values on ListEventFeedsRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ListEventFeedsRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListEventFeedsRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListEventFeedsRequestMultiError, or nil if none found.
+func (m *ListEventFeedsRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListEventFeedsRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListEventFeedsRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListEventFeedsRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListEventFeedsRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListEventFeedsRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListEventFeedsRequestMultiError is an error wrapping multiple validation
+// errors returned by ListEventFeedsRequest.ValidateAll() if the designated
+// constraints aren't met.
+type ListEventFeedsRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListEventFeedsRequestMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListEventFeedsRequestMultiError) AllErrors() []error { return m }
+
+// ListEventFeedsRequestValidationError is the validation error returned by
+// ListEventFeedsRequest.Validate if the designated constraints aren't met.
+type ListEventFeedsRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListEventFeedsRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListEventFeedsRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListEventFeedsRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListEventFeedsRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListEventFeedsRequestValidationError) ErrorName() string {
+ return "ListEventFeedsRequestValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListEventFeedsRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListEventFeedsRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListEventFeedsRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListEventFeedsRequestValidationError{}
+
+// Validate checks the field values on ListEventFeedsResponse with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ListEventFeedsResponse) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListEventFeedsResponse with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListEventFeedsResponseMultiError, or nil if none found.
+func (m *ListEventFeedsResponse) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListEventFeedsResponse) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetList() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("List[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("List[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("List[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListEventFeedsResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListEventFeedsResponseMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListEventFeedsResponseMultiError is an error wrapping multiple validation
+// errors returned by ListEventFeedsResponse.ValidateAll() if the designated
+// constraints aren't met.
+type ListEventFeedsResponseMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListEventFeedsResponseMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListEventFeedsResponseMultiError) AllErrors() []error { return m }
+
+// ListEventFeedsResponseValidationError is the validation error returned by
+// ListEventFeedsResponse.Validate if the designated constraints aren't met.
+type ListEventFeedsResponseValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListEventFeedsResponseValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListEventFeedsResponseValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListEventFeedsResponseValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListEventFeedsResponseValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListEventFeedsResponseValidationError) ErrorName() string {
+ return "ListEventFeedsResponseValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListEventFeedsResponseValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListEventFeedsResponse.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListEventFeedsResponseValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListEventFeedsResponseValidationError{}
+
// Validate checks the field values on Event with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
@@ -627,6 +948,47 @@ func (m *Event) validate(all bool) error {
}
}
+ case *Event_ResourceChangeEvent:
+ if v == nil {
+ err := EventValidationError{
+ field: "Event",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetResourceChangeEvent()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EventValidationError{
+ field: "ResourceChangeEvent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EventValidationError{
+ field: "ResourceChangeEvent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResourceChangeEvent()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EventValidationError{
+ field: "ResourceChangeEvent",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
default:
_ = v // ensures v is used
}
@@ -1149,3 +1511,318 @@ var _ interface {
Cause() error
ErrorName() string
} = RevokeEventValidationError{}
+
+// Validate checks the field values on ResourceChangeEvent with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceChangeEvent) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceChangeEvent with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceChangeEventMultiError, or nil if none found.
+func (m *ResourceChangeEvent) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceChangeEvent) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetResourceId() == nil {
+ err := ResourceChangeEventValidationError{
+ field: "ResourceId",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetResourceId()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceChangeEventValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceChangeEventValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResourceId()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceChangeEventValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetParentResourceId()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceChangeEventValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceChangeEventValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParentResourceId()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceChangeEventValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ResourceChangeEventMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceChangeEventMultiError is an error wrapping multiple validation
+// errors returned by ResourceChangeEvent.ValidateAll() if the designated
+// constraints aren't met.
+type ResourceChangeEventMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceChangeEventMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceChangeEventMultiError) AllErrors() []error { return m }
+
+// ResourceChangeEventValidationError is the validation error returned by
+// ResourceChangeEvent.Validate if the designated constraints aren't met.
+type ResourceChangeEventValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceChangeEventValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceChangeEventValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceChangeEventValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceChangeEventValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceChangeEventValidationError) ErrorName() string {
+ return "ResourceChangeEventValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceChangeEventValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceChangeEvent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceChangeEventValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceChangeEventValidationError{}
+
+// Validate checks the field values on EventFeedMetadata with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *EventFeedMetadata) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EventFeedMetadata with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// EventFeedMetadataMultiError, or nil if none found.
+func (m *EventFeedMetadata) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EventFeedMetadata) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if l := len(m.GetId()); l < 1 || l > 1024 {
+ err := EventFeedMetadataValidationError{
+ field: "Id",
+ reason: "value length must be between 1 and 1024 bytes, inclusive",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ _EventFeedMetadata_SupportedEventTypes_Unique := make(map[EventType]struct{}, len(m.GetSupportedEventTypes()))
+
+ for idx, item := range m.GetSupportedEventTypes() {
+ _, _ = idx, item
+
+ if _, exists := _EventFeedMetadata_SupportedEventTypes_Unique[item]; exists {
+ err := EventFeedMetadataValidationError{
+ field: fmt.Sprintf("SupportedEventTypes[%v]", idx),
+ reason: "repeated value must contain unique items",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+ _EventFeedMetadata_SupportedEventTypes_Unique[item] = struct{}{}
+ }
+
+ if _, ok := EventType_name[int32(item)]; !ok {
+ err := EventFeedMetadataValidationError{
+ field: fmt.Sprintf("SupportedEventTypes[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return EventFeedMetadataMultiError(errors)
+ }
+
+ return nil
+}
+
+// EventFeedMetadataMultiError is an error wrapping multiple validation errors
+// returned by EventFeedMetadata.ValidateAll() if the designated constraints
+// aren't met.
+type EventFeedMetadataMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EventFeedMetadataMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EventFeedMetadataMultiError) AllErrors() []error { return m }
+
+// EventFeedMetadataValidationError is the validation error returned by
+// EventFeedMetadata.Validate if the designated constraints aren't met.
+type EventFeedMetadataValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EventFeedMetadataValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EventFeedMetadataValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EventFeedMetadataValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EventFeedMetadataValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EventFeedMetadataValidationError) ErrorName() string {
+ return "EventFeedMetadataValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EventFeedMetadataValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEventFeedMetadata.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EventFeedMetadataValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EventFeedMetadataValidationError{}
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_grpc.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_grpc.pb.go
index e82c8c80..1d5b11f2 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_grpc.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/event_feed_grpc.pb.go
@@ -19,7 +19,8 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
- EventService_ListEvents_FullMethodName = "/c1.connector.v2.EventService/ListEvents"
+ EventService_ListEvents_FullMethodName = "/c1.connector.v2.EventService/ListEvents"
+ EventService_ListEventFeeds_FullMethodName = "/c1.connector.v2.EventService/ListEventFeeds"
)
// EventServiceClient is the client API for EventService service.
@@ -27,6 +28,7 @@ const (
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type EventServiceClient interface {
ListEvents(ctx context.Context, in *ListEventsRequest, opts ...grpc.CallOption) (*ListEventsResponse, error)
+ ListEventFeeds(ctx context.Context, in *ListEventFeedsRequest, opts ...grpc.CallOption) (*ListEventFeedsResponse, error)
}
type eventServiceClient struct {
@@ -47,11 +49,22 @@ func (c *eventServiceClient) ListEvents(ctx context.Context, in *ListEventsReque
return out, nil
}
+func (c *eventServiceClient) ListEventFeeds(ctx context.Context, in *ListEventFeedsRequest, opts ...grpc.CallOption) (*ListEventFeedsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(ListEventFeedsResponse)
+ err := c.cc.Invoke(ctx, EventService_ListEventFeeds_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// EventServiceServer is the server API for EventService service.
// All implementations should embed UnimplementedEventServiceServer
// for forward compatibility.
type EventServiceServer interface {
ListEvents(context.Context, *ListEventsRequest) (*ListEventsResponse, error)
+ ListEventFeeds(context.Context, *ListEventFeedsRequest) (*ListEventFeedsResponse, error)
}
// UnimplementedEventServiceServer should be embedded to have
@@ -64,6 +77,9 @@ type UnimplementedEventServiceServer struct{}
func (UnimplementedEventServiceServer) ListEvents(context.Context, *ListEventsRequest) (*ListEventsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListEvents not implemented")
}
+func (UnimplementedEventServiceServer) ListEventFeeds(context.Context, *ListEventFeedsRequest) (*ListEventFeedsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListEventFeeds not implemented")
+}
func (UnimplementedEventServiceServer) testEmbeddedByValue() {}
// UnsafeEventServiceServer may be embedded to opt out of forward compatibility for this service.
@@ -102,6 +118,24 @@ func _EventService_ListEvents_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
+func _EventService_ListEventFeeds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListEventFeedsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(EventServiceServer).ListEventFeeds(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: EventService_ListEventFeeds_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(EventServiceServer).ListEventFeeds(ctx, req.(*ListEventFeedsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// EventService_ServiceDesc is the grpc.ServiceDesc for EventService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -113,6 +147,10 @@ var EventService_ServiceDesc = grpc.ServiceDesc{
MethodName: "ListEvents",
Handler: _EventService_ListEvents_Handler,
},
+ {
+ MethodName: "ListEventFeeds",
+ Handler: _EventService_ListEventFeeds_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "c1/connector/v2/event_feed.proto",
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go
index 1639a155..ad9f2313 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.go
@@ -1610,6 +1610,118 @@ func (x *ResourcesServiceListResourcesResponse) GetAnnotations() []*anypb.Any {
return nil
}
+type ResourceGetterServiceGetResourceRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ResourceId *ResourceId `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+ ParentResourceId *ResourceId `protobuf:"bytes,2,opt,name=parent_resource_id,json=parentResourceId,proto3" json:"parent_resource_id,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ResourceGetterServiceGetResourceRequest) Reset() {
+ *x = ResourceGetterServiceGetResourceRequest{}
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourceGetterServiceGetResourceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceGetterServiceGetResourceRequest) ProtoMessage() {}
+
+func (x *ResourceGetterServiceGetResourceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceGetterServiceGetResourceRequest.ProtoReflect.Descriptor instead.
+func (*ResourceGetterServiceGetResourceRequest) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *ResourceGetterServiceGetResourceRequest) GetResourceId() *ResourceId {
+ if x != nil {
+ return x.ResourceId
+ }
+ return nil
+}
+
+func (x *ResourceGetterServiceGetResourceRequest) GetParentResourceId() *ResourceId {
+ if x != nil {
+ return x.ParentResourceId
+ }
+ return nil
+}
+
+func (x *ResourceGetterServiceGetResourceRequest) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+type ResourceGetterServiceGetResourceResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ResourceGetterServiceGetResourceResponse) Reset() {
+ *x = ResourceGetterServiceGetResourceResponse{}
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourceGetterServiceGetResourceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceGetterServiceGetResourceResponse) ProtoMessage() {}
+
+func (x *ResourceGetterServiceGetResourceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[23]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceGetterServiceGetResourceResponse.ProtoReflect.Descriptor instead.
+func (*ResourceGetterServiceGetResourceResponse) Descriptor() ([]byte, []int) {
+ return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *ResourceGetterServiceGetResourceResponse) GetResource() *Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *ResourceGetterServiceGetResourceResponse) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
type ExternalId struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
@@ -1621,7 +1733,7 @@ type ExternalId struct {
func (x *ExternalId) Reset() {
*x = ExternalId{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[22]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1633,7 +1745,7 @@ func (x *ExternalId) String() string {
func (*ExternalId) ProtoMessage() {}
func (x *ExternalId) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[22]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[24]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1646,7 +1758,7 @@ func (x *ExternalId) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExternalId.ProtoReflect.Descriptor instead.
func (*ExternalId) Descriptor() ([]byte, []int) {
- return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{22}
+ return file_c1_connector_v2_resource_proto_rawDescGZIP(), []int{24}
}
func (x *ExternalId) GetId() string {
@@ -1681,7 +1793,7 @@ type AccountInfo_Email struct {
func (x *AccountInfo_Email) Reset() {
*x = AccountInfo_Email{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[23]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1693,7 +1805,7 @@ func (x *AccountInfo_Email) String() string {
func (*AccountInfo_Email) ProtoMessage() {}
func (x *AccountInfo_Email) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[23]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[25]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1732,7 +1844,7 @@ type CredentialOptions_RandomPassword struct {
func (x *CredentialOptions_RandomPassword) Reset() {
*x = CredentialOptions_RandomPassword{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[24]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1744,7 +1856,7 @@ func (x *CredentialOptions_RandomPassword) String() string {
func (*CredentialOptions_RandomPassword) ProtoMessage() {}
func (x *CredentialOptions_RandomPassword) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[24]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[26]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1775,7 +1887,7 @@ type CredentialOptions_NoPassword struct {
func (x *CredentialOptions_NoPassword) Reset() {
*x = CredentialOptions_NoPassword{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[25]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1787,7 +1899,7 @@ func (x *CredentialOptions_NoPassword) String() string {
func (*CredentialOptions_NoPassword) ProtoMessage() {}
func (x *CredentialOptions_NoPassword) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[25]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[27]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1812,7 +1924,7 @@ type CredentialOptions_SSO struct {
func (x *CredentialOptions_SSO) Reset() {
*x = CredentialOptions_SSO{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[26]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1824,7 +1936,7 @@ func (x *CredentialOptions_SSO) String() string {
func (*CredentialOptions_SSO) ProtoMessage() {}
func (x *CredentialOptions_SSO) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[26]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[28]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1857,7 +1969,7 @@ type CreateAccountResponse_SuccessResult struct {
func (x *CreateAccountResponse_SuccessResult) Reset() {
*x = CreateAccountResponse_SuccessResult{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[27]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1869,7 +1981,7 @@ func (x *CreateAccountResponse_SuccessResult) String() string {
func (*CreateAccountResponse_SuccessResult) ProtoMessage() {}
func (x *CreateAccountResponse_SuccessResult) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[27]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[29]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1910,7 +2022,7 @@ type CreateAccountResponse_ActionRequiredResult struct {
func (x *CreateAccountResponse_ActionRequiredResult) Reset() {
*x = CreateAccountResponse_ActionRequiredResult{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[28]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1922,7 +2034,7 @@ func (x *CreateAccountResponse_ActionRequiredResult) String() string {
func (*CreateAccountResponse_ActionRequiredResult) ProtoMessage() {}
func (x *CreateAccountResponse_ActionRequiredResult) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[28]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[30]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1968,7 +2080,7 @@ type EncryptionConfig_JWKPublicKeyConfig struct {
func (x *EncryptionConfig_JWKPublicKeyConfig) Reset() {
*x = EncryptionConfig_JWKPublicKeyConfig{}
- mi := &file_c1_connector_v2_resource_proto_msgTypes[29]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1980,7 +2092,7 @@ func (x *EncryptionConfig_JWKPublicKeyConfig) String() string {
func (*EncryptionConfig_JWKPublicKeyConfig) ProtoMessage() {}
func (x *EncryptionConfig_JWKPublicKeyConfig) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connector_v2_resource_proto_msgTypes[29]
+ mi := &file_c1_connector_v2_resource_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2350,75 +2462,110 @@ var file_c1_connector_v2_resource_proto_rawDesc = string([]byte{
0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x52, 0x0a, 0x0a, 0x45,
- 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e,
- 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a,
- 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x32,
- 0xab, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
- 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x92, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73,
- 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x3d,
- 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32,
- 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, 0x27,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63,
+ 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x00, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x28, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47,
+ 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
+ 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
+ 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x52,
+ 0x0a, 0x0a, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04,
+ 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x32, 0xab, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x92, 0x01, 0x0a, 0x11,
+ 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
+ 0x73, 0x12, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
+ 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x3e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x32, 0x92, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e,
0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e,
- 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x92, 0x01,
- 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x12, 0x7e, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
- 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73,
- 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x32, 0xde, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x61, 0x0a,
- 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
- 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76,
- 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x61, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x32, 0x81, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67,
- 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x56, 0x32, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x9c, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
+ 0x82, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x38, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x74, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xde, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
+ 0x61, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
+ 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x61, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63,
0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x32, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x83, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x10, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72,
- 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
- 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74,
- 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x77, 0x0a,
- 0x15, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x81, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x67, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x56, 0x32, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29,
0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32,
- 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e,
- 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63,
- 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56,
+ 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x83, 0x01, 0x0a, 0x18, 0x43, 0x72,
+ 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x10, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65,
+ 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x28, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x74,
+ 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32,
+ 0x77, 0x0a, 0x15, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x26, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72,
+ 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62,
+ 0x2f, 0x63, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x76, 0x32,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
@@ -2434,7 +2581,7 @@ func file_c1_connector_v2_resource_proto_rawDescGZIP() []byte {
}
var file_c1_connector_v2_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_c1_connector_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
+var file_c1_connector_v2_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
var file_c1_connector_v2_resource_proto_goTypes = []any{
(ResourceType_Trait)(0), // 0: c1.connector.v2.ResourceType.Trait
(Resource_CreationSource)(0), // 1: c1.connector.v2.Resource.CreationSource
@@ -2460,81 +2607,90 @@ var file_c1_connector_v2_resource_proto_goTypes = []any{
(*Resource)(nil), // 21: c1.connector.v2.Resource
(*ResourcesServiceListResourcesRequest)(nil), // 22: c1.connector.v2.ResourcesServiceListResourcesRequest
(*ResourcesServiceListResourcesResponse)(nil), // 23: c1.connector.v2.ResourcesServiceListResourcesResponse
- (*ExternalId)(nil), // 24: c1.connector.v2.ExternalId
- (*AccountInfo_Email)(nil), // 25: c1.connector.v2.AccountInfo.Email
- (*CredentialOptions_RandomPassword)(nil), // 26: c1.connector.v2.CredentialOptions.RandomPassword
- (*CredentialOptions_NoPassword)(nil), // 27: c1.connector.v2.CredentialOptions.NoPassword
- (*CredentialOptions_SSO)(nil), // 28: c1.connector.v2.CredentialOptions.SSO
- (*CreateAccountResponse_SuccessResult)(nil), // 29: c1.connector.v2.CreateAccountResponse.SuccessResult
- (*CreateAccountResponse_ActionRequiredResult)(nil), // 30: c1.connector.v2.CreateAccountResponse.ActionRequiredResult
- (*EncryptionConfig_JWKPublicKeyConfig)(nil), // 31: c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig
- (*anypb.Any)(nil), // 32: google.protobuf.Any
- (*structpb.Struct)(nil), // 33: google.protobuf.Struct
+ (*ResourceGetterServiceGetResourceRequest)(nil), // 24: c1.connector.v2.ResourceGetterServiceGetResourceRequest
+ (*ResourceGetterServiceGetResourceResponse)(nil), // 25: c1.connector.v2.ResourceGetterServiceGetResourceResponse
+ (*ExternalId)(nil), // 26: c1.connector.v2.ExternalId
+ (*AccountInfo_Email)(nil), // 27: c1.connector.v2.AccountInfo.Email
+ (*CredentialOptions_RandomPassword)(nil), // 28: c1.connector.v2.CredentialOptions.RandomPassword
+ (*CredentialOptions_NoPassword)(nil), // 29: c1.connector.v2.CredentialOptions.NoPassword
+ (*CredentialOptions_SSO)(nil), // 30: c1.connector.v2.CredentialOptions.SSO
+ (*CreateAccountResponse_SuccessResult)(nil), // 31: c1.connector.v2.CreateAccountResponse.SuccessResult
+ (*CreateAccountResponse_ActionRequiredResult)(nil), // 32: c1.connector.v2.CreateAccountResponse.ActionRequiredResult
+ (*EncryptionConfig_JWKPublicKeyConfig)(nil), // 33: c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig
+ (*anypb.Any)(nil), // 34: google.protobuf.Any
+ (*structpb.Struct)(nil), // 35: google.protobuf.Struct
}
var file_c1_connector_v2_resource_proto_depIdxs = []int32{
0, // 0: c1.connector.v2.ResourceType.traits:type_name -> c1.connector.v2.ResourceType.Trait
- 32, // 1: c1.connector.v2.ResourceType.annotations:type_name -> google.protobuf.Any
+ 34, // 1: c1.connector.v2.ResourceType.annotations:type_name -> google.protobuf.Any
21, // 2: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.parent:type_name -> c1.connector.v2.Resource
- 32, // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.annotations:type_name -> google.protobuf.Any
+ 34, // 3: c1.connector.v2.ResourceTypesServiceListResourceTypesRequest.annotations:type_name -> google.protobuf.Any
2, // 4: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.list:type_name -> c1.connector.v2.ResourceType
- 32, // 5: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.annotations:type_name -> google.protobuf.Any
+ 34, // 5: c1.connector.v2.ResourceTypesServiceListResourceTypesResponse.annotations:type_name -> google.protobuf.Any
21, // 6: c1.connector.v2.CreateResourceRequest.resource:type_name -> c1.connector.v2.Resource
21, // 7: c1.connector.v2.CreateResourceResponse.created:type_name -> c1.connector.v2.Resource
- 32, // 8: c1.connector.v2.CreateResourceResponse.annotations:type_name -> google.protobuf.Any
+ 34, // 8: c1.connector.v2.CreateResourceResponse.annotations:type_name -> google.protobuf.Any
20, // 9: c1.connector.v2.DeleteResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId
- 32, // 10: c1.connector.v2.DeleteResourceResponse.annotations:type_name -> google.protobuf.Any
+ 34, // 10: c1.connector.v2.DeleteResourceResponse.annotations:type_name -> google.protobuf.Any
20, // 11: c1.connector.v2.DeleteResourceV2Request.resource_id:type_name -> c1.connector.v2.ResourceId
- 32, // 12: c1.connector.v2.DeleteResourceV2Response.annotations:type_name -> google.protobuf.Any
+ 34, // 12: c1.connector.v2.DeleteResourceV2Response.annotations:type_name -> google.protobuf.Any
20, // 13: c1.connector.v2.RotateCredentialRequest.resource_id:type_name -> c1.connector.v2.ResourceId
14, // 14: c1.connector.v2.RotateCredentialRequest.credential_options:type_name -> c1.connector.v2.CredentialOptions
19, // 15: c1.connector.v2.RotateCredentialRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
17, // 16: c1.connector.v2.RotateCredentialResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData
20, // 17: c1.connector.v2.RotateCredentialResponse.resource_id:type_name -> c1.connector.v2.ResourceId
- 32, // 18: c1.connector.v2.RotateCredentialResponse.annotations:type_name -> google.protobuf.Any
- 25, // 19: c1.connector.v2.AccountInfo.emails:type_name -> c1.connector.v2.AccountInfo.Email
- 33, // 20: c1.connector.v2.AccountInfo.profile:type_name -> google.protobuf.Struct
- 26, // 21: c1.connector.v2.CredentialOptions.random_password:type_name -> c1.connector.v2.CredentialOptions.RandomPassword
- 27, // 22: c1.connector.v2.CredentialOptions.no_password:type_name -> c1.connector.v2.CredentialOptions.NoPassword
- 28, // 23: c1.connector.v2.CredentialOptions.sso:type_name -> c1.connector.v2.CredentialOptions.SSO
+ 34, // 18: c1.connector.v2.RotateCredentialResponse.annotations:type_name -> google.protobuf.Any
+ 27, // 19: c1.connector.v2.AccountInfo.emails:type_name -> c1.connector.v2.AccountInfo.Email
+ 35, // 20: c1.connector.v2.AccountInfo.profile:type_name -> google.protobuf.Struct
+ 28, // 21: c1.connector.v2.CredentialOptions.random_password:type_name -> c1.connector.v2.CredentialOptions.RandomPassword
+ 29, // 22: c1.connector.v2.CredentialOptions.no_password:type_name -> c1.connector.v2.CredentialOptions.NoPassword
+ 30, // 23: c1.connector.v2.CredentialOptions.sso:type_name -> c1.connector.v2.CredentialOptions.SSO
13, // 24: c1.connector.v2.CreateAccountRequest.account_info:type_name -> c1.connector.v2.AccountInfo
14, // 25: c1.connector.v2.CreateAccountRequest.credential_options:type_name -> c1.connector.v2.CredentialOptions
19, // 26: c1.connector.v2.CreateAccountRequest.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
- 29, // 27: c1.connector.v2.CreateAccountResponse.success:type_name -> c1.connector.v2.CreateAccountResponse.SuccessResult
- 30, // 28: c1.connector.v2.CreateAccountResponse.action_required:type_name -> c1.connector.v2.CreateAccountResponse.ActionRequiredResult
+ 31, // 27: c1.connector.v2.CreateAccountResponse.success:type_name -> c1.connector.v2.CreateAccountResponse.SuccessResult
+ 32, // 28: c1.connector.v2.CreateAccountResponse.action_required:type_name -> c1.connector.v2.CreateAccountResponse.ActionRequiredResult
17, // 29: c1.connector.v2.CreateAccountResponse.encrypted_data:type_name -> c1.connector.v2.EncryptedData
- 32, // 30: c1.connector.v2.CreateAccountResponse.annotations:type_name -> google.protobuf.Any
+ 34, // 30: c1.connector.v2.CreateAccountResponse.annotations:type_name -> google.protobuf.Any
21, // 31: c1.connector.v2.EncryptionConfig.principal:type_name -> c1.connector.v2.Resource
- 31, // 32: c1.connector.v2.EncryptionConfig.jwk_public_key_config:type_name -> c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig
+ 33, // 32: c1.connector.v2.EncryptionConfig.jwk_public_key_config:type_name -> c1.connector.v2.EncryptionConfig.JWKPublicKeyConfig
20, // 33: c1.connector.v2.Resource.id:type_name -> c1.connector.v2.ResourceId
20, // 34: c1.connector.v2.Resource.parent_resource_id:type_name -> c1.connector.v2.ResourceId
- 32, // 35: c1.connector.v2.Resource.annotations:type_name -> google.protobuf.Any
- 24, // 36: c1.connector.v2.Resource.external_id:type_name -> c1.connector.v2.ExternalId
+ 34, // 35: c1.connector.v2.Resource.annotations:type_name -> google.protobuf.Any
+ 26, // 36: c1.connector.v2.Resource.external_id:type_name -> c1.connector.v2.ExternalId
1, // 37: c1.connector.v2.Resource.creation_source:type_name -> c1.connector.v2.Resource.CreationSource
20, // 38: c1.connector.v2.ResourcesServiceListResourcesRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId
- 32, // 39: c1.connector.v2.ResourcesServiceListResourcesRequest.annotations:type_name -> google.protobuf.Any
+ 34, // 39: c1.connector.v2.ResourcesServiceListResourcesRequest.annotations:type_name -> google.protobuf.Any
21, // 40: c1.connector.v2.ResourcesServiceListResourcesResponse.list:type_name -> c1.connector.v2.Resource
- 32, // 41: c1.connector.v2.ResourcesServiceListResourcesResponse.annotations:type_name -> google.protobuf.Any
- 21, // 42: c1.connector.v2.CreateAccountResponse.SuccessResult.resource:type_name -> c1.connector.v2.Resource
- 21, // 43: c1.connector.v2.CreateAccountResponse.ActionRequiredResult.resource:type_name -> c1.connector.v2.Resource
- 3, // 44: c1.connector.v2.ResourceTypesService.ListResourceTypes:input_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesRequest
- 22, // 45: c1.connector.v2.ResourcesService.ListResources:input_type -> c1.connector.v2.ResourcesServiceListResourcesRequest
- 5, // 46: c1.connector.v2.ResourceManagerService.CreateResource:input_type -> c1.connector.v2.CreateResourceRequest
- 7, // 47: c1.connector.v2.ResourceManagerService.DeleteResource:input_type -> c1.connector.v2.DeleteResourceRequest
- 9, // 48: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:input_type -> c1.connector.v2.DeleteResourceV2Request
- 11, // 49: c1.connector.v2.CredentialManagerService.RotateCredential:input_type -> c1.connector.v2.RotateCredentialRequest
- 15, // 50: c1.connector.v2.AccountManagerService.CreateAccount:input_type -> c1.connector.v2.CreateAccountRequest
- 4, // 51: c1.connector.v2.ResourceTypesService.ListResourceTypes:output_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesResponse
- 23, // 52: c1.connector.v2.ResourcesService.ListResources:output_type -> c1.connector.v2.ResourcesServiceListResourcesResponse
- 6, // 53: c1.connector.v2.ResourceManagerService.CreateResource:output_type -> c1.connector.v2.CreateResourceResponse
- 8, // 54: c1.connector.v2.ResourceManagerService.DeleteResource:output_type -> c1.connector.v2.DeleteResourceResponse
- 10, // 55: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:output_type -> c1.connector.v2.DeleteResourceV2Response
- 12, // 56: c1.connector.v2.CredentialManagerService.RotateCredential:output_type -> c1.connector.v2.RotateCredentialResponse
- 16, // 57: c1.connector.v2.AccountManagerService.CreateAccount:output_type -> c1.connector.v2.CreateAccountResponse
- 51, // [51:58] is the sub-list for method output_type
- 44, // [44:51] is the sub-list for method input_type
- 44, // [44:44] is the sub-list for extension type_name
- 44, // [44:44] is the sub-list for extension extendee
- 0, // [0:44] is the sub-list for field type_name
+ 34, // 41: c1.connector.v2.ResourcesServiceListResourcesResponse.annotations:type_name -> google.protobuf.Any
+ 20, // 42: c1.connector.v2.ResourceGetterServiceGetResourceRequest.resource_id:type_name -> c1.connector.v2.ResourceId
+ 20, // 43: c1.connector.v2.ResourceGetterServiceGetResourceRequest.parent_resource_id:type_name -> c1.connector.v2.ResourceId
+ 34, // 44: c1.connector.v2.ResourceGetterServiceGetResourceRequest.annotations:type_name -> google.protobuf.Any
+ 21, // 45: c1.connector.v2.ResourceGetterServiceGetResourceResponse.resource:type_name -> c1.connector.v2.Resource
+ 34, // 46: c1.connector.v2.ResourceGetterServiceGetResourceResponse.annotations:type_name -> google.protobuf.Any
+ 21, // 47: c1.connector.v2.CreateAccountResponse.SuccessResult.resource:type_name -> c1.connector.v2.Resource
+ 21, // 48: c1.connector.v2.CreateAccountResponse.ActionRequiredResult.resource:type_name -> c1.connector.v2.Resource
+ 3, // 49: c1.connector.v2.ResourceTypesService.ListResourceTypes:input_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesRequest
+ 22, // 50: c1.connector.v2.ResourcesService.ListResources:input_type -> c1.connector.v2.ResourcesServiceListResourcesRequest
+ 24, // 51: c1.connector.v2.ResourceGetterService.GetResource:input_type -> c1.connector.v2.ResourceGetterServiceGetResourceRequest
+ 5, // 52: c1.connector.v2.ResourceManagerService.CreateResource:input_type -> c1.connector.v2.CreateResourceRequest
+ 7, // 53: c1.connector.v2.ResourceManagerService.DeleteResource:input_type -> c1.connector.v2.DeleteResourceRequest
+ 9, // 54: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:input_type -> c1.connector.v2.DeleteResourceV2Request
+ 11, // 55: c1.connector.v2.CredentialManagerService.RotateCredential:input_type -> c1.connector.v2.RotateCredentialRequest
+ 15, // 56: c1.connector.v2.AccountManagerService.CreateAccount:input_type -> c1.connector.v2.CreateAccountRequest
+ 4, // 57: c1.connector.v2.ResourceTypesService.ListResourceTypes:output_type -> c1.connector.v2.ResourceTypesServiceListResourceTypesResponse
+ 23, // 58: c1.connector.v2.ResourcesService.ListResources:output_type -> c1.connector.v2.ResourcesServiceListResourcesResponse
+ 25, // 59: c1.connector.v2.ResourceGetterService.GetResource:output_type -> c1.connector.v2.ResourceGetterServiceGetResourceResponse
+ 6, // 60: c1.connector.v2.ResourceManagerService.CreateResource:output_type -> c1.connector.v2.CreateResourceResponse
+ 8, // 61: c1.connector.v2.ResourceManagerService.DeleteResource:output_type -> c1.connector.v2.DeleteResourceResponse
+ 10, // 62: c1.connector.v2.ResourceDeleterService.DeleteResourceV2:output_type -> c1.connector.v2.DeleteResourceV2Response
+ 12, // 63: c1.connector.v2.CredentialManagerService.RotateCredential:output_type -> c1.connector.v2.RotateCredentialResponse
+ 16, // 64: c1.connector.v2.AccountManagerService.CreateAccount:output_type -> c1.connector.v2.CreateAccountResponse
+ 57, // [57:65] is the sub-list for method output_type
+ 49, // [49:57] is the sub-list for method input_type
+ 49, // [49:49] is the sub-list for extension type_name
+ 49, // [49:49] is the sub-list for extension extendee
+ 0, // [0:49] is the sub-list for field type_name
}
func init() { file_c1_connector_v2_resource_proto_init() }
@@ -2560,9 +2716,9 @@ func file_c1_connector_v2_resource_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connector_v2_resource_proto_rawDesc), len(file_c1_connector_v2_resource_proto_rawDesc)),
NumEnums: 2,
- NumMessages: 30,
+ NumMessages: 32,
NumExtensions: 0,
- NumServices: 6,
+ NumServices: 7,
},
GoTypes: file_c1_connector_v2_resource_proto_goTypes,
DependencyIndexes: file_c1_connector_v2_resource_proto_depIdxs,
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go
index ca9162e8..2e0d07a6 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource.pb.validate.go
@@ -3875,6 +3875,373 @@ var _ interface {
ErrorName() string
} = ResourcesServiceListResourcesResponseValidationError{}
+// Validate checks the field values on ResourceGetterServiceGetResourceRequest
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *ResourceGetterServiceGetResourceRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ResourceGetterServiceGetResourceRequest with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// ResourceGetterServiceGetResourceRequestMultiError, or nil if none found.
+func (m *ResourceGetterServiceGetResourceRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceGetterServiceGetResourceRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetResourceId()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResourceId()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetParentResourceId()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParentResourceId()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceGetterServiceGetResourceRequestValidationError{
+ field: "ParentResourceId",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceGetterServiceGetResourceRequestValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ResourceGetterServiceGetResourceRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceGetterServiceGetResourceRequestMultiError is an error wrapping
+// multiple validation errors returned by
+// ResourceGetterServiceGetResourceRequest.ValidateAll() if the designated
+// constraints aren't met.
+type ResourceGetterServiceGetResourceRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceGetterServiceGetResourceRequestMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceGetterServiceGetResourceRequestMultiError) AllErrors() []error { return m }
+
+// ResourceGetterServiceGetResourceRequestValidationError is the validation
+// error returned by ResourceGetterServiceGetResourceRequest.Validate if the
+// designated constraints aren't met.
+type ResourceGetterServiceGetResourceRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceGetterServiceGetResourceRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceGetterServiceGetResourceRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceGetterServiceGetResourceRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceGetterServiceGetResourceRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceGetterServiceGetResourceRequestValidationError) ErrorName() string {
+ return "ResourceGetterServiceGetResourceRequestValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceGetterServiceGetResourceRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceGetterServiceGetResourceRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceGetterServiceGetResourceRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceGetterServiceGetResourceRequestValidationError{}
+
+// Validate checks the field values on ResourceGetterServiceGetResourceResponse
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *ResourceGetterServiceGetResourceResponse) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ResourceGetterServiceGetResourceResponse with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// ResourceGetterServiceGetResourceResponseMultiError, or nil if none found.
+func (m *ResourceGetterServiceGetResourceResponse) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceGetterServiceGetResourceResponse) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceResponseValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceResponseValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceGetterServiceGetResourceResponseValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceGetterServiceGetResourceResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceGetterServiceGetResourceResponseValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ResourceGetterServiceGetResourceResponseMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceGetterServiceGetResourceResponseMultiError is an error wrapping
+// multiple validation errors returned by
+// ResourceGetterServiceGetResourceResponse.ValidateAll() if the designated
+// constraints aren't met.
+type ResourceGetterServiceGetResourceResponseMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceGetterServiceGetResourceResponseMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceGetterServiceGetResourceResponseMultiError) AllErrors() []error { return m }
+
+// ResourceGetterServiceGetResourceResponseValidationError is the validation
+// error returned by ResourceGetterServiceGetResourceResponse.Validate if the
+// designated constraints aren't met.
+type ResourceGetterServiceGetResourceResponseValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceGetterServiceGetResourceResponseValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceGetterServiceGetResourceResponseValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceGetterServiceGetResourceResponseValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceGetterServiceGetResourceResponseValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceGetterServiceGetResourceResponseValidationError) ErrorName() string {
+ return "ResourceGetterServiceGetResourceResponseValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceGetterServiceGetResourceResponseValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceGetterServiceGetResourceResponse.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceGetterServiceGetResourceResponseValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceGetterServiceGetResourceResponseValidationError{}
+
// Validate checks the field values on ExternalId with the rules defined in the
// proto definition for this message. If any rules are violated, the first
// error encountered is returned, or nil if there are no violations.
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_grpc.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_grpc.pb.go
index a3718142..c1fe9d71 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_grpc.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connector/v2/resource_grpc.pb.go
@@ -218,6 +218,106 @@ var ResourcesService_ServiceDesc = grpc.ServiceDesc{
Metadata: "c1/connector/v2/resource.proto",
}
+const (
+ ResourceGetterService_GetResource_FullMethodName = "/c1.connector.v2.ResourceGetterService/GetResource"
+)
+
+// ResourceGetterServiceClient is the client API for ResourceGetterService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ResourceGetterServiceClient interface {
+ GetResource(ctx context.Context, in *ResourceGetterServiceGetResourceRequest, opts ...grpc.CallOption) (*ResourceGetterServiceGetResourceResponse, error)
+}
+
+type resourceGetterServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewResourceGetterServiceClient(cc grpc.ClientConnInterface) ResourceGetterServiceClient {
+ return &resourceGetterServiceClient{cc}
+}
+
+func (c *resourceGetterServiceClient) GetResource(ctx context.Context, in *ResourceGetterServiceGetResourceRequest, opts ...grpc.CallOption) (*ResourceGetterServiceGetResourceResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(ResourceGetterServiceGetResourceResponse)
+ err := c.cc.Invoke(ctx, ResourceGetterService_GetResource_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ResourceGetterServiceServer is the server API for ResourceGetterService service.
+// All implementations should embed UnimplementedResourceGetterServiceServer
+// for forward compatibility.
+type ResourceGetterServiceServer interface {
+ GetResource(context.Context, *ResourceGetterServiceGetResourceRequest) (*ResourceGetterServiceGetResourceResponse, error)
+}
+
+// UnimplementedResourceGetterServiceServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedResourceGetterServiceServer struct{}
+
+func (UnimplementedResourceGetterServiceServer) GetResource(context.Context, *ResourceGetterServiceGetResourceRequest) (*ResourceGetterServiceGetResourceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetResource not implemented")
+}
+func (UnimplementedResourceGetterServiceServer) testEmbeddedByValue() {}
+
+// UnsafeResourceGetterServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ResourceGetterServiceServer will
+// result in compilation errors.
+type UnsafeResourceGetterServiceServer interface {
+ mustEmbedUnimplementedResourceGetterServiceServer()
+}
+
+func RegisterResourceGetterServiceServer(s grpc.ServiceRegistrar, srv ResourceGetterServiceServer) {
+ // If the following call pancis, it indicates UnimplementedResourceGetterServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&ResourceGetterService_ServiceDesc, srv)
+}
+
+func _ResourceGetterService_GetResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResourceGetterServiceGetResourceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ResourceGetterServiceServer).GetResource(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ResourceGetterService_GetResource_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ResourceGetterServiceServer).GetResource(ctx, req.(*ResourceGetterServiceGetResourceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// ResourceGetterService_ServiceDesc is the grpc.ServiceDesc for ResourceGetterService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ResourceGetterService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "c1.connector.v2.ResourceGetterService",
+ HandlerType: (*ResourceGetterServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "GetResource",
+ Handler: _ResourceGetterService_GetResource_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "c1/connector/v2/resource.proto",
+}
+
const (
ResourceManagerService_CreateResource_FullMethodName = "/c1.connector.v2.ResourceManagerService/CreateResource"
ResourceManagerService_DeleteResource_FullMethodName = "/c1.connector.v2.ResourceManagerService/DeleteResource"
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go
index 94fee00f..cc49f178 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.go
@@ -108,6 +108,8 @@ type Task struct {
// *Task_ActionGetSchema
// *Task_ActionInvoke
// *Task_ActionStatus
+ // *Task_CreateSyncDiff
+ // *Task_CompactSyncs_
TaskType isTask_TaskType `protobuf_oneof:"task_type"`
Debug bool `protobuf:"varint,3,opt,name=debug,proto3" json:"debug,omitempty"`
unknownFields protoimpl.UnknownFields
@@ -336,6 +338,24 @@ func (x *Task) GetActionStatus() *Task_ActionStatusTask {
return nil
}
+func (x *Task) GetCreateSyncDiff() *Task_CreateSyncDiffTask {
+ if x != nil {
+ if x, ok := x.TaskType.(*Task_CreateSyncDiff); ok {
+ return x.CreateSyncDiff
+ }
+ }
+ return nil
+}
+
+func (x *Task) GetCompactSyncs() *Task_CompactSyncs {
+ if x != nil {
+ if x, ok := x.TaskType.(*Task_CompactSyncs_); ok {
+ return x.CompactSyncs
+ }
+ }
+ return nil
+}
+
func (x *Task) GetDebug() bool {
if x != nil {
return x.Debug
@@ -423,6 +443,14 @@ type Task_ActionStatus struct {
ActionStatus *Task_ActionStatusTask `protobuf:"bytes,118,opt,name=action_status,json=actionStatus,proto3,oneof"`
}
+type Task_CreateSyncDiff struct {
+ CreateSyncDiff *Task_CreateSyncDiffTask `protobuf:"bytes,119,opt,name=create_sync_diff,json=createSyncDiff,proto3,oneof"`
+}
+
+type Task_CompactSyncs_ struct {
+ CompactSyncs *Task_CompactSyncs `protobuf:"bytes,120,opt,name=compact_syncs,json=compactSyncs,proto3,oneof"`
+}
+
func (*Task_None) isTask_TaskType() {}
func (*Task_Hello) isTask_TaskType() {}
@@ -461,6 +489,10 @@ func (*Task_ActionInvoke) isTask_TaskType() {}
func (*Task_ActionStatus) isTask_TaskType() {}
+func (*Task_CreateSyncDiff) isTask_TaskType() {}
+
+func (*Task_CompactSyncs_) isTask_TaskType() {}
+
type BatonServiceHelloRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
HostId string `protobuf:"bytes,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"`
@@ -2166,6 +2198,171 @@ func (x *Task_ActionStatusTask) GetAnnotations() []*anypb.Any {
return nil
}
+type Task_CreateSyncDiffTask struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Open to suggestions here
+ BaseSyncId string `protobuf:"bytes,1,opt,name=base_sync_id,json=baseSyncId,proto3" json:"base_sync_id,omitempty"`
+ NewSyncId string `protobuf:"bytes,2,opt,name=new_sync_id,json=newSyncId,proto3" json:"new_sync_id,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Task_CreateSyncDiffTask) Reset() {
+ *x = Task_CreateSyncDiffTask{}
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Task_CreateSyncDiffTask) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Task_CreateSyncDiffTask) ProtoMessage() {}
+
+func (x *Task_CreateSyncDiffTask) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Task_CreateSyncDiffTask.ProtoReflect.Descriptor instead.
+func (*Task_CreateSyncDiffTask) Descriptor() ([]byte, []int) {
+ return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 19}
+}
+
+func (x *Task_CreateSyncDiffTask) GetBaseSyncId() string {
+ if x != nil {
+ return x.BaseSyncId
+ }
+ return ""
+}
+
+func (x *Task_CreateSyncDiffTask) GetNewSyncId() string {
+ if x != nil {
+ return x.NewSyncId
+ }
+ return ""
+}
+
+func (x *Task_CreateSyncDiffTask) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+type Task_CompactSyncs struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ CompactableSyncs []*Task_CompactSyncs_CompactableSync `protobuf:"bytes,1,rep,name=compactable_syncs,json=compactableSyncs,proto3" json:"compactable_syncs,omitempty"`
+ Annotations []*anypb.Any `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Task_CompactSyncs) Reset() {
+ *x = Task_CompactSyncs{}
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Task_CompactSyncs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Task_CompactSyncs) ProtoMessage() {}
+
+func (x *Task_CompactSyncs) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Task_CompactSyncs.ProtoReflect.Descriptor instead.
+func (*Task_CompactSyncs) Descriptor() ([]byte, []int) {
+ return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 20}
+}
+
+func (x *Task_CompactSyncs) GetCompactableSyncs() []*Task_CompactSyncs_CompactableSync {
+ if x != nil {
+ return x.CompactableSyncs
+ }
+ return nil
+}
+
+func (x *Task_CompactSyncs) GetAnnotations() []*anypb.Any {
+ if x != nil {
+ return x.Annotations
+ }
+ return nil
+}
+
+type Task_CompactSyncs_CompactableSync struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FilePath string `protobuf:"bytes,1,opt,name=file_path,json=filePath,proto3" json:"file_path,omitempty"`
+ SyncId string `protobuf:"bytes,2,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Task_CompactSyncs_CompactableSync) Reset() {
+ *x = Task_CompactSyncs_CompactableSync{}
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Task_CompactSyncs_CompactableSync) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Task_CompactSyncs_CompactableSync) ProtoMessage() {}
+
+func (x *Task_CompactSyncs_CompactableSync) ProtoReflect() protoreflect.Message {
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Task_CompactSyncs_CompactableSync.ProtoReflect.Descriptor instead.
+func (*Task_CompactSyncs_CompactableSync) Descriptor() ([]byte, []int) {
+ return file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP(), []int{0, 20, 0}
+}
+
+func (x *Task_CompactSyncs_CompactableSync) GetFilePath() string {
+ if x != nil {
+ return x.FilePath
+ }
+ return ""
+}
+
+func (x *Task_CompactSyncs_CompactableSync) GetSyncId() string {
+ if x != nil {
+ return x.SyncId
+ }
+ return ""
+}
+
type BatonServiceHelloRequest_BuildInfo struct {
state protoimpl.MessageState `protogen:"open.v1"`
LangVersion string `protobuf:"bytes,1,opt,name=lang_version,json=langVersion,proto3" json:"lang_version,omitempty"`
@@ -2177,7 +2374,7 @@ type BatonServiceHelloRequest_BuildInfo struct {
func (x *BatonServiceHelloRequest_BuildInfo) Reset() {
*x = BatonServiceHelloRequest_BuildInfo{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2189,7 +2386,7 @@ func (x *BatonServiceHelloRequest_BuildInfo) String() string {
func (*BatonServiceHelloRequest_BuildInfo) ProtoMessage() {}
func (x *BatonServiceHelloRequest_BuildInfo) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[32]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2242,7 +2439,7 @@ type BatonServiceHelloRequest_OSInfo struct {
func (x *BatonServiceHelloRequest_OSInfo) Reset() {
*x = BatonServiceHelloRequest_OSInfo{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2254,7 +2451,7 @@ func (x *BatonServiceHelloRequest_OSInfo) String() string {
func (*BatonServiceHelloRequest_OSInfo) ProtoMessage() {}
func (x *BatonServiceHelloRequest_OSInfo) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[33]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2337,7 +2534,7 @@ type BatonServiceUploadAssetRequest_UploadMetadata struct {
func (x *BatonServiceUploadAssetRequest_UploadMetadata) Reset() {
*x = BatonServiceUploadAssetRequest_UploadMetadata{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2349,7 +2546,7 @@ func (x *BatonServiceUploadAssetRequest_UploadMetadata) String() string {
func (*BatonServiceUploadAssetRequest_UploadMetadata) ProtoMessage() {}
func (x *BatonServiceUploadAssetRequest_UploadMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[34]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2396,7 +2593,7 @@ type BatonServiceUploadAssetRequest_UploadData struct {
func (x *BatonServiceUploadAssetRequest_UploadData) Reset() {
*x = BatonServiceUploadAssetRequest_UploadData{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2408,7 +2605,7 @@ func (x *BatonServiceUploadAssetRequest_UploadData) String() string {
func (*BatonServiceUploadAssetRequest_UploadData) ProtoMessage() {}
func (x *BatonServiceUploadAssetRequest_UploadData) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[35]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2441,7 +2638,7 @@ type BatonServiceUploadAssetRequest_UploadEOF struct {
func (x *BatonServiceUploadAssetRequest_UploadEOF) Reset() {
*x = BatonServiceUploadAssetRequest_UploadEOF{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2453,7 +2650,7 @@ func (x *BatonServiceUploadAssetRequest_UploadEOF) String() string {
func (*BatonServiceUploadAssetRequest_UploadEOF) ProtoMessage() {}
func (x *BatonServiceUploadAssetRequest_UploadEOF) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[36]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[39]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2495,7 +2692,7 @@ type BatonServiceFinishTaskRequest_Error struct {
func (x *BatonServiceFinishTaskRequest_Error) Reset() {
*x = BatonServiceFinishTaskRequest_Error{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2507,7 +2704,7 @@ func (x *BatonServiceFinishTaskRequest_Error) String() string {
func (*BatonServiceFinishTaskRequest_Error) ProtoMessage() {}
func (x *BatonServiceFinishTaskRequest_Error) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[37]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[40]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2555,7 +2752,7 @@ type BatonServiceFinishTaskRequest_Success struct {
func (x *BatonServiceFinishTaskRequest_Success) Reset() {
*x = BatonServiceFinishTaskRequest_Success{}
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2567,7 +2764,7 @@ func (x *BatonServiceFinishTaskRequest_Success) String() string {
func (*BatonServiceFinishTaskRequest_Success) ProtoMessage() {}
func (x *BatonServiceFinishTaskRequest_Success) ProtoReflect() protoreflect.Message {
- mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[38]
+ mi := &file_c1_connectorapi_baton_v1_baton_proto_msgTypes[41]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2624,7 +2821,7 @@ var file_c1_connectorapi_baton_v1_baton_proto_rawDesc = string([]byte{
0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdd, 0x21, 0x0a, 0x04,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x26, 0x0a, 0x04,
0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x02, 0x69, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
@@ -2733,160 +2930,196 @@ var file_c1_connectorapi_baton_v1_baton_proto_rawDesc = string([]byte{
0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62,
0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x41, 0x63, 0x74,
0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52,
- 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a,
- 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x64, 0x65,
- 0x62, 0x75, 0x67, 0x1a, 0x42, 0x0a, 0x08, 0x4e, 0x6f, 0x6e, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12,
+ 0x0c, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a,
+ 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x64, 0x69, 0x66,
+ 0x66, 0x18, 0x77, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e,
+ 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79,
+ 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x12, 0x52, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x73, 0x18, 0x78, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54,
+ 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73,
+ 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x1a, 0x42, 0x0a, 0x08, 0x4e, 0x6f, 0x6e, 0x65, 0x54, 0x61,
+ 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x43, 0x0a, 0x09, 0x48, 0x65,
+ 0x6c, 0x6c, 0x6f, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
+ 0x46, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x46, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x12,
0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x43, 0x0a, 0x09, 0x48, 0x65, 0x6c, 0x6c, 0x6f,
- 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
- 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x46, 0x0a, 0x0c,
- 0x53, 0x79, 0x6e, 0x63, 0x46, 0x75, 0x6c, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x7e, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x65,
- 0x64, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
- 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a,
- 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x73, 0x74, 0x61,
- 0x72, 0x74, 0x41, 0x74, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x54, 0x61,
- 0x73, 0x6b, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x7e, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74,
+ 0x46, 0x65, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x35, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x74, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x47, 0x72, 0x61, 0x6e,
+ 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x6c,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70,
+ 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x36,
+ 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x72, 0x0a,
+ 0x0a, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x2c, 0x0a, 0x05, 0x67,
+ 0x72, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61,
+ 0x6e, 0x74, 0x52, 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x1a, 0xf9, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3f, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e,
+ 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
+ 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x4b, 0x0a,
+ 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54,
+ 0x61, 0x73, 0x6b, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0b, 0x61,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x52, 0x0a, 0x12, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b,
+ 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x1a, 0xfa,
+ 0x01, 0x0a, 0x15, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xd5, 0x01, 0x0a, 0x10,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b,
+ 0x12, 0x45, 0x0a, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65,
+ 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32,
+ 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x74,
+ 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61,
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x72, 0x0a, 0x0a, 0x52, 0x65,
- 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x2c, 0x0a, 0x05, 0x67, 0x72, 0x61, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e,
- 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x61, 0x6e, 0x74, 0x52,
- 0x05, 0x67, 0x72, 0x61, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x15, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x58, 0x0a, 0x0f,
+ 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
+ 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b,
+ 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x12, 0x42, 0x75, 0x6c, 0x6b, 0x47, 0x65,
+ 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x55, 0x0a, 0x0f,
+ 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31,
+ 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54,
+ 0x61, 0x73, 0x6b, 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65,
+ 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x64, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65,
+ 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74,
+ 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4f, 0x0a, 0x15, 0x41, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x54,
+ 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x13, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61,
+ 0x73, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
- 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xf9,
- 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3f, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
- 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
- 0x69, 0x61, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
- 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63,
- 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x4b, 0x0a, 0x12, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b,
- 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x52, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3c, 0x0a,
- 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52,
- 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x1a, 0xfa, 0x01, 0x0a, 0x15,
- 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
- 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3c, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x31, 0x2e,
- 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x49, 0x64, 0x12, 0x51, 0x0a, 0x12, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
- 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76,
- 0x32, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x11, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xd5, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x45, 0x0a,
- 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0d, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x31,
- 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69,
- 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x74, 0x69, 0x63, 0x6b,
- 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8b,
+ 0x01, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x54,
+ 0x61, 0x73, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04,
+ 0x61, 0x72, 0x67, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6e, 0x0a, 0x10,
+ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8e, 0x01, 0x0a,
+ 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x44, 0x69, 0x66, 0x66, 0x54,
+ 0x61, 0x73, 0x6b, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x53,
+ 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x79, 0x6e,
+ 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x77, 0x53,
+ 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
+ 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xf9, 0x01,
+ 0x0a, 0x0c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x68,
+ 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x79,
+ 0x6e, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, 0x31, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f,
+ 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63,
+ 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62,
+ 0x6c, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61,
+ 0x62, 0x6c, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x1a, 0x71, 0x0a, 0x15, 0x42, 0x75, 0x6c, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x58, 0x0a, 0x0f, 0x74, 0x69, 0x63,
- 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61,
- 0x73, 0x6b, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54,
- 0x61, 0x73, 0x6b, 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x12, 0x42, 0x75, 0x6c, 0x6b, 0x47, 0x65, 0x74, 0x54, 0x69,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x55, 0x0a, 0x0f, 0x74, 0x69, 0x63,
- 0x6b, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f,
- 0x72, 0x61, 0x70, 0x69, 0x2e, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61,
- 0x73, 0x6b, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b,
- 0x52, 0x0e, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73,
- 0x1a, 0x4f, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e,
- 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x1a, 0x64, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61,
- 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12,
- 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4f, 0x0a, 0x15, 0x41, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x54, 0x61, 0x73, 0x6b,
- 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e,
- 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x61, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x73, 0x6b, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x10,
- 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x61, 0x73, 0x6b,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67,
- 0x73, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6e, 0x0a, 0x10, 0x41, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
- 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x73, 0x0a, 0x06, 0x53, 0x74, 0x61,
+ 0x1a, 0x47, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53,
+ 0x79, 0x6e, 0x63, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x73, 0x0a, 0x06, 0x53, 0x74, 0x61,
0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e,
0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53,
0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12,
@@ -3174,7 +3407,7 @@ func file_c1_connectorapi_baton_v1_baton_proto_rawDescGZIP() []byte {
}
var file_c1_connectorapi_baton_v1_baton_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 39)
+var file_c1_connectorapi_baton_v1_baton_proto_msgTypes = make([]protoimpl.MessageInfo, 42)
var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{
(Task_Status)(0), // 0: c1.connectorapi.baton.v1.Task.Status
(*Task)(nil), // 1: c1.connectorapi.baton.v1.Task
@@ -3209,28 +3442,31 @@ var file_c1_connectorapi_baton_v1_baton_proto_goTypes = []any{
(*Task_ActionGetSchemaTask)(nil), // 30: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask
(*Task_ActionInvokeTask)(nil), // 31: c1.connectorapi.baton.v1.Task.ActionInvokeTask
(*Task_ActionStatusTask)(nil), // 32: c1.connectorapi.baton.v1.Task.ActionStatusTask
- (*BatonServiceHelloRequest_BuildInfo)(nil), // 33: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo
- (*BatonServiceHelloRequest_OSInfo)(nil), // 34: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo
- (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata
- (*BatonServiceUploadAssetRequest_UploadData)(nil), // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData
- (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF
- (*BatonServiceFinishTaskRequest_Error)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error
- (*BatonServiceFinishTaskRequest_Success)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success
- (*v2.ConnectorMetadata)(nil), // 40: c1.connector.v2.ConnectorMetadata
- (*anypb.Any)(nil), // 41: google.protobuf.Any
- (*durationpb.Duration)(nil), // 42: google.protobuf.Duration
- (*status.Status)(nil), // 43: google.rpc.Status
- (*timestamppb.Timestamp)(nil), // 44: google.protobuf.Timestamp
- (*v2.Entitlement)(nil), // 45: c1.connector.v2.Entitlement
- (*v2.Resource)(nil), // 46: c1.connector.v2.Resource
- (*v2.Grant)(nil), // 47: c1.connector.v2.Grant
- (*v2.AccountInfo)(nil), // 48: c1.connector.v2.AccountInfo
- (*v2.CredentialOptions)(nil), // 49: c1.connector.v2.CredentialOptions
- (*v2.EncryptionConfig)(nil), // 50: c1.connector.v2.EncryptionConfig
- (*v2.ResourceId)(nil), // 51: c1.connector.v2.ResourceId
- (*v2.TicketRequest)(nil), // 52: c1.connector.v2.TicketRequest
- (*v2.TicketSchema)(nil), // 53: c1.connector.v2.TicketSchema
- (*structpb.Struct)(nil), // 54: google.protobuf.Struct
+ (*Task_CreateSyncDiffTask)(nil), // 33: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask
+ (*Task_CompactSyncs)(nil), // 34: c1.connectorapi.baton.v1.Task.CompactSyncs
+ (*Task_CompactSyncs_CompactableSync)(nil), // 35: c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync
+ (*BatonServiceHelloRequest_BuildInfo)(nil), // 36: c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo
+ (*BatonServiceHelloRequest_OSInfo)(nil), // 37: c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo
+ (*BatonServiceUploadAssetRequest_UploadMetadata)(nil), // 38: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata
+ (*BatonServiceUploadAssetRequest_UploadData)(nil), // 39: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData
+ (*BatonServiceUploadAssetRequest_UploadEOF)(nil), // 40: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF
+ (*BatonServiceFinishTaskRequest_Error)(nil), // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error
+ (*BatonServiceFinishTaskRequest_Success)(nil), // 42: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success
+ (*v2.ConnectorMetadata)(nil), // 43: c1.connector.v2.ConnectorMetadata
+ (*anypb.Any)(nil), // 44: google.protobuf.Any
+ (*durationpb.Duration)(nil), // 45: google.protobuf.Duration
+ (*status.Status)(nil), // 46: google.rpc.Status
+ (*timestamppb.Timestamp)(nil), // 47: google.protobuf.Timestamp
+ (*v2.Entitlement)(nil), // 48: c1.connector.v2.Entitlement
+ (*v2.Resource)(nil), // 49: c1.connector.v2.Resource
+ (*v2.Grant)(nil), // 50: c1.connector.v2.Grant
+ (*v2.AccountInfo)(nil), // 51: c1.connector.v2.AccountInfo
+ (*v2.CredentialOptions)(nil), // 52: c1.connector.v2.CredentialOptions
+ (*v2.EncryptionConfig)(nil), // 53: c1.connector.v2.EncryptionConfig
+ (*v2.ResourceId)(nil), // 54: c1.connector.v2.ResourceId
+ (*v2.TicketRequest)(nil), // 55: c1.connector.v2.TicketRequest
+ (*v2.TicketSchema)(nil), // 56: c1.connector.v2.TicketSchema
+ (*structpb.Struct)(nil), // 57: google.protobuf.Struct
}
var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{
0, // 0: c1.connectorapi.baton.v1.Task.status:type_name -> c1.connectorapi.baton.v1.Task.Status
@@ -3253,80 +3489,85 @@ var file_c1_connectorapi_baton_v1_baton_proto_depIdxs = []int32{
30, // 17: c1.connectorapi.baton.v1.Task.action_get_schema:type_name -> c1.connectorapi.baton.v1.Task.ActionGetSchemaTask
31, // 18: c1.connectorapi.baton.v1.Task.action_invoke:type_name -> c1.connectorapi.baton.v1.Task.ActionInvokeTask
32, // 19: c1.connectorapi.baton.v1.Task.action_status:type_name -> c1.connectorapi.baton.v1.Task.ActionStatusTask
- 33, // 20: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo
- 34, // 21: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo
- 40, // 22: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata
- 41, // 23: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any
- 41, // 24: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any
- 1, // 25: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task
- 42, // 26: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration
- 42, // 27: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration
- 41, // 28: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any
- 41, // 29: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any
- 42, // 30: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration
- 41, // 31: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any
- 35, // 32: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata
- 36, // 33: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData
- 37, // 34: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF
- 41, // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any
- 43, // 36: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status
- 38, // 37: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error
- 39, // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success
- 41, // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any
- 41, // 40: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any
- 41, // 41: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any
- 41, // 42: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any
- 41, // 43: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any
- 44, // 44: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp
- 45, // 45: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement
- 46, // 46: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource
- 41, // 47: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any
- 42, // 48: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration
- 47, // 49: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant
- 41, // 50: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any
- 48, // 51: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo
- 49, // 52: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions
- 50, // 53: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
- 46, // 54: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource
- 51, // 55: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId
- 51, // 56: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId
- 49, // 57: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions
- 50, // 58: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
- 52, // 59: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest
- 53, // 60: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema
- 41, // 61: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any
- 24, // 62: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask
- 28, // 63: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask
- 41, // 64: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any
- 41, // 65: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any
- 41, // 66: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any
- 41, // 67: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any
- 54, // 68: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct
- 41, // 69: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any
- 41, // 70: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any
- 41, // 71: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any
- 41, // 72: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any
- 41, // 73: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any
- 41, // 74: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any
- 41, // 75: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any
- 41, // 76: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any
- 2, // 77: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest
- 4, // 78: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest
- 6, // 79: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest
- 10, // 80: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest
- 8, // 81: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest
- 12, // 82: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest
- 3, // 83: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse
- 5, // 84: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse
- 7, // 85: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse
- 11, // 86: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse
- 9, // 87: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse
- 13, // 88: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse
- 83, // [83:89] is the sub-list for method output_type
- 77, // [77:83] is the sub-list for method input_type
- 77, // [77:77] is the sub-list for extension type_name
- 77, // [77:77] is the sub-list for extension extendee
- 0, // [0:77] is the sub-list for field type_name
+ 33, // 20: c1.connectorapi.baton.v1.Task.create_sync_diff:type_name -> c1.connectorapi.baton.v1.Task.CreateSyncDiffTask
+ 34, // 21: c1.connectorapi.baton.v1.Task.compact_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs
+ 36, // 22: c1.connectorapi.baton.v1.BatonServiceHelloRequest.build_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.BuildInfo
+ 37, // 23: c1.connectorapi.baton.v1.BatonServiceHelloRequest.os_info:type_name -> c1.connectorapi.baton.v1.BatonServiceHelloRequest.OSInfo
+ 43, // 24: c1.connectorapi.baton.v1.BatonServiceHelloRequest.connector_metadata:type_name -> c1.connector.v2.ConnectorMetadata
+ 44, // 25: c1.connectorapi.baton.v1.BatonServiceHelloRequest.annotations:type_name -> google.protobuf.Any
+ 44, // 26: c1.connectorapi.baton.v1.BatonServiceHelloResponse.annotations:type_name -> google.protobuf.Any
+ 1, // 27: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.task:type_name -> c1.connectorapi.baton.v1.Task
+ 45, // 28: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_poll:type_name -> google.protobuf.Duration
+ 45, // 29: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.next_heartbeat:type_name -> google.protobuf.Duration
+ 44, // 30: c1.connectorapi.baton.v1.BatonServiceGetTaskResponse.annotations:type_name -> google.protobuf.Any
+ 44, // 31: c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest.annotations:type_name -> google.protobuf.Any
+ 45, // 32: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.next_heartbeat:type_name -> google.protobuf.Duration
+ 44, // 33: c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse.annotations:type_name -> google.protobuf.Any
+ 38, // 34: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.metadata:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata
+ 39, // 35: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.data:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadData
+ 40, // 36: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.eof:type_name -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF
+ 44, // 37: c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse.annotations:type_name -> google.protobuf.Any
+ 46, // 38: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.status:type_name -> google.rpc.Status
+ 41, // 39: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.error:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error
+ 42, // 40: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.success:type_name -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success
+ 44, // 41: c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse.annotations:type_name -> google.protobuf.Any
+ 44, // 42: c1.connectorapi.baton.v1.Task.NoneTask.annotations:type_name -> google.protobuf.Any
+ 44, // 43: c1.connectorapi.baton.v1.Task.HelloTask.annotations:type_name -> google.protobuf.Any
+ 44, // 44: c1.connectorapi.baton.v1.Task.SyncFullTask.annotations:type_name -> google.protobuf.Any
+ 44, // 45: c1.connectorapi.baton.v1.Task.EventFeedTask.annotations:type_name -> google.protobuf.Any
+ 47, // 46: c1.connectorapi.baton.v1.Task.EventFeedTask.start_at:type_name -> google.protobuf.Timestamp
+ 48, // 47: c1.connectorapi.baton.v1.Task.GrantTask.entitlement:type_name -> c1.connector.v2.Entitlement
+ 49, // 48: c1.connectorapi.baton.v1.Task.GrantTask.principal:type_name -> c1.connector.v2.Resource
+ 44, // 49: c1.connectorapi.baton.v1.Task.GrantTask.annotations:type_name -> google.protobuf.Any
+ 45, // 50: c1.connectorapi.baton.v1.Task.GrantTask.duration:type_name -> google.protobuf.Duration
+ 50, // 51: c1.connectorapi.baton.v1.Task.RevokeTask.grant:type_name -> c1.connector.v2.Grant
+ 44, // 52: c1.connectorapi.baton.v1.Task.RevokeTask.annotations:type_name -> google.protobuf.Any
+ 51, // 53: c1.connectorapi.baton.v1.Task.CreateAccountTask.account_info:type_name -> c1.connector.v2.AccountInfo
+ 52, // 54: c1.connectorapi.baton.v1.Task.CreateAccountTask.credential_options:type_name -> c1.connector.v2.CredentialOptions
+ 53, // 55: c1.connectorapi.baton.v1.Task.CreateAccountTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
+ 49, // 56: c1.connectorapi.baton.v1.Task.CreateResourceTask.resource:type_name -> c1.connector.v2.Resource
+ 54, // 57: c1.connectorapi.baton.v1.Task.DeleteResourceTask.resource_id:type_name -> c1.connector.v2.ResourceId
+ 54, // 58: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.resource_id:type_name -> c1.connector.v2.ResourceId
+ 52, // 59: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.credential_options:type_name -> c1.connector.v2.CredentialOptions
+ 53, // 60: c1.connectorapi.baton.v1.Task.RotateCredentialsTask.encryption_configs:type_name -> c1.connector.v2.EncryptionConfig
+ 55, // 61: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_request:type_name -> c1.connector.v2.TicketRequest
+ 56, // 62: c1.connectorapi.baton.v1.Task.CreateTicketTask.ticket_schema:type_name -> c1.connector.v2.TicketSchema
+ 44, // 63: c1.connectorapi.baton.v1.Task.CreateTicketTask.annotations:type_name -> google.protobuf.Any
+ 24, // 64: c1.connectorapi.baton.v1.Task.BulkCreateTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.CreateTicketTask
+ 28, // 65: c1.connectorapi.baton.v1.Task.BulkGetTicketsTask.ticket_requests:type_name -> c1.connectorapi.baton.v1.Task.GetTicketTask
+ 44, // 66: c1.connectorapi.baton.v1.Task.ListTicketSchemasTask.annotations:type_name -> google.protobuf.Any
+ 44, // 67: c1.connectorapi.baton.v1.Task.GetTicketTask.annotations:type_name -> google.protobuf.Any
+ 44, // 68: c1.connectorapi.baton.v1.Task.ActionListSchemasTask.annotations:type_name -> google.protobuf.Any
+ 44, // 69: c1.connectorapi.baton.v1.Task.ActionGetSchemaTask.annotations:type_name -> google.protobuf.Any
+ 57, // 70: c1.connectorapi.baton.v1.Task.ActionInvokeTask.args:type_name -> google.protobuf.Struct
+ 44, // 71: c1.connectorapi.baton.v1.Task.ActionInvokeTask.annotations:type_name -> google.protobuf.Any
+ 44, // 72: c1.connectorapi.baton.v1.Task.ActionStatusTask.annotations:type_name -> google.protobuf.Any
+ 44, // 73: c1.connectorapi.baton.v1.Task.CreateSyncDiffTask.annotations:type_name -> google.protobuf.Any
+ 35, // 74: c1.connectorapi.baton.v1.Task.CompactSyncs.compactable_syncs:type_name -> c1.connectorapi.baton.v1.Task.CompactSyncs.CompactableSync
+ 44, // 75: c1.connectorapi.baton.v1.Task.CompactSyncs.annotations:type_name -> google.protobuf.Any
+ 44, // 76: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadMetadata.annotations:type_name -> google.protobuf.Any
+ 44, // 77: c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest.UploadEOF.annotations:type_name -> google.protobuf.Any
+ 44, // 78: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.annotations:type_name -> google.protobuf.Any
+ 44, // 79: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Error.response:type_name -> google.protobuf.Any
+ 44, // 80: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.annotations:type_name -> google.protobuf.Any
+ 44, // 81: c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest.Success.response:type_name -> google.protobuf.Any
+ 2, // 82: c1.connectorapi.baton.v1.BatonService.Hello:input_type -> c1.connectorapi.baton.v1.BatonServiceHelloRequest
+ 4, // 83: c1.connectorapi.baton.v1.BatonService.GetTask:input_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskRequest
+ 6, // 84: c1.connectorapi.baton.v1.BatonService.Heartbeat:input_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatRequest
+ 10, // 85: c1.connectorapi.baton.v1.BatonService.FinishTask:input_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskRequest
+ 8, // 86: c1.connectorapi.baton.v1.BatonService.UploadAsset:input_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetRequest
+ 12, // 87: c1.connectorapi.baton.v1.BatonService.StartDebugging:input_type -> c1.connectorapi.baton.v1.StartDebuggingRequest
+ 3, // 88: c1.connectorapi.baton.v1.BatonService.Hello:output_type -> c1.connectorapi.baton.v1.BatonServiceHelloResponse
+ 5, // 89: c1.connectorapi.baton.v1.BatonService.GetTask:output_type -> c1.connectorapi.baton.v1.BatonServiceGetTaskResponse
+ 7, // 90: c1.connectorapi.baton.v1.BatonService.Heartbeat:output_type -> c1.connectorapi.baton.v1.BatonServiceHeartbeatResponse
+ 11, // 91: c1.connectorapi.baton.v1.BatonService.FinishTask:output_type -> c1.connectorapi.baton.v1.BatonServiceFinishTaskResponse
+ 9, // 92: c1.connectorapi.baton.v1.BatonService.UploadAsset:output_type -> c1.connectorapi.baton.v1.BatonServiceUploadAssetResponse
+ 13, // 93: c1.connectorapi.baton.v1.BatonService.StartDebugging:output_type -> c1.connectorapi.baton.v1.StartDebuggingResponse
+ 88, // [88:94] is the sub-list for method output_type
+ 82, // [82:88] is the sub-list for method input_type
+ 82, // [82:82] is the sub-list for extension type_name
+ 82, // [82:82] is the sub-list for extension extendee
+ 0, // [0:82] is the sub-list for field type_name
}
func init() { file_c1_connectorapi_baton_v1_baton_proto_init() }
@@ -3354,6 +3595,8 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() {
(*Task_ActionGetSchema)(nil),
(*Task_ActionInvoke)(nil),
(*Task_ActionStatus)(nil),
+ (*Task_CreateSyncDiff)(nil),
+ (*Task_CompactSyncs_)(nil),
}
file_c1_connectorapi_baton_v1_baton_proto_msgTypes[7].OneofWrappers = []any{
(*BatonServiceUploadAssetRequest_Metadata)(nil),
@@ -3370,7 +3613,7 @@ func file_c1_connectorapi_baton_v1_baton_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_c1_connectorapi_baton_v1_baton_proto_rawDesc), len(file_c1_connectorapi_baton_v1_baton_proto_rawDesc)),
NumEnums: 1,
- NumMessages: 39,
+ NumMessages: 42,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go
index da7fa9c8..15741a2a 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1/baton.pb.validate.go
@@ -842,6 +842,88 @@ func (m *Task) validate(all bool) error {
}
}
+ case *Task_CreateSyncDiff:
+ if v == nil {
+ err := TaskValidationError{
+ field: "TaskType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetCreateSyncDiff()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TaskValidationError{
+ field: "CreateSyncDiff",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TaskValidationError{
+ field: "CreateSyncDiff",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCreateSyncDiff()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TaskValidationError{
+ field: "CreateSyncDiff",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Task_CompactSyncs_:
+ if v == nil {
+ err := TaskValidationError{
+ field: "TaskType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetCompactSyncs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TaskValidationError{
+ field: "CompactSyncs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TaskValidationError{
+ field: "CompactSyncs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCompactSyncs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TaskValidationError{
+ field: "CompactSyncs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
default:
_ = v // ensures v is used
}
@@ -5932,6 +6014,425 @@ var _ interface {
ErrorName() string
} = Task_ActionStatusTaskValidationError{}
+// Validate checks the field values on Task_CreateSyncDiffTask with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Task_CreateSyncDiffTask) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Task_CreateSyncDiffTask with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Task_CreateSyncDiffTaskMultiError, or nil if none found.
+func (m *Task_CreateSyncDiffTask) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Task_CreateSyncDiffTask) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BaseSyncId
+
+ // no validation rules for NewSyncId
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Task_CreateSyncDiffTaskValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Task_CreateSyncDiffTaskValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Task_CreateSyncDiffTaskValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Task_CreateSyncDiffTaskMultiError(errors)
+ }
+
+ return nil
+}
+
+// Task_CreateSyncDiffTaskMultiError is an error wrapping multiple validation
+// errors returned by Task_CreateSyncDiffTask.ValidateAll() if the designated
+// constraints aren't met.
+type Task_CreateSyncDiffTaskMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Task_CreateSyncDiffTaskMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Task_CreateSyncDiffTaskMultiError) AllErrors() []error { return m }
+
+// Task_CreateSyncDiffTaskValidationError is the validation error returned by
+// Task_CreateSyncDiffTask.Validate if the designated constraints aren't met.
+type Task_CreateSyncDiffTaskValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Task_CreateSyncDiffTaskValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Task_CreateSyncDiffTaskValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Task_CreateSyncDiffTaskValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Task_CreateSyncDiffTaskValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Task_CreateSyncDiffTaskValidationError) ErrorName() string {
+ return "Task_CreateSyncDiffTaskValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Task_CreateSyncDiffTaskValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTask_CreateSyncDiffTask.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Task_CreateSyncDiffTaskValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Task_CreateSyncDiffTaskValidationError{}
+
+// Validate checks the field values on Task_CompactSyncs with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Task_CompactSyncs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Task_CompactSyncs with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Task_CompactSyncsMultiError, or nil if none found.
+func (m *Task_CompactSyncs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Task_CompactSyncs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetCompactableSyncs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("CompactableSyncs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("CompactableSyncs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("CompactableSyncs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetAnnotations() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Task_CompactSyncsValidationError{
+ field: fmt.Sprintf("Annotations[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Task_CompactSyncsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Task_CompactSyncsMultiError is an error wrapping multiple validation errors
+// returned by Task_CompactSyncs.ValidateAll() if the designated constraints
+// aren't met.
+type Task_CompactSyncsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Task_CompactSyncsMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Task_CompactSyncsMultiError) AllErrors() []error { return m }
+
+// Task_CompactSyncsValidationError is the validation error returned by
+// Task_CompactSyncs.Validate if the designated constraints aren't met.
+type Task_CompactSyncsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Task_CompactSyncsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Task_CompactSyncsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Task_CompactSyncsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Task_CompactSyncsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Task_CompactSyncsValidationError) ErrorName() string {
+ return "Task_CompactSyncsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Task_CompactSyncsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTask_CompactSyncs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Task_CompactSyncsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Task_CompactSyncsValidationError{}
+
+// Validate checks the field values on Task_CompactSyncs_CompactableSync with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *Task_CompactSyncs_CompactableSync) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Task_CompactSyncs_CompactableSync
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// Task_CompactSyncs_CompactableSyncMultiError, or nil if none found.
+func (m *Task_CompactSyncs_CompactableSync) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Task_CompactSyncs_CompactableSync) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for FilePath
+
+ // no validation rules for SyncId
+
+ if len(errors) > 0 {
+ return Task_CompactSyncs_CompactableSyncMultiError(errors)
+ }
+
+ return nil
+}
+
+// Task_CompactSyncs_CompactableSyncMultiError is an error wrapping multiple
+// validation errors returned by
+// Task_CompactSyncs_CompactableSync.ValidateAll() if the designated
+// constraints aren't met.
+type Task_CompactSyncs_CompactableSyncMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Task_CompactSyncs_CompactableSyncMultiError) Error() string {
+ msgs := make([]string, 0, len(m))
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Task_CompactSyncs_CompactableSyncMultiError) AllErrors() []error { return m }
+
+// Task_CompactSyncs_CompactableSyncValidationError is the validation error
+// returned by Task_CompactSyncs_CompactableSync.Validate if the designated
+// constraints aren't met.
+type Task_CompactSyncs_CompactableSyncValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Task_CompactSyncs_CompactableSyncValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Task_CompactSyncs_CompactableSyncValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Task_CompactSyncs_CompactableSyncValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Task_CompactSyncs_CompactableSyncValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Task_CompactSyncs_CompactableSyncValidationError) ErrorName() string {
+ return "Task_CompactSyncs_CompactableSyncValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Task_CompactSyncs_CompactableSyncValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTask_CompactSyncs_CompactableSync.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Task_CompactSyncs_CompactableSyncValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Task_CompactSyncs_CompactableSyncValidationError{}
+
// Validate checks the field values on BatonServiceHelloRequest_BuildInfo with
// the rules defined in the proto definition for this message. If any rules
// are violated, the first error encountered is returned, or nil if there are
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go
index 9b50c16f..ec106f8e 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.go
@@ -30,6 +30,8 @@ type SyncRun struct {
StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"`
EndedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=ended_at,json=endedAt,proto3" json:"ended_at,omitempty"`
SyncToken string `protobuf:"bytes,4,opt,name=sync_token,json=syncToken,proto3" json:"sync_token,omitempty"`
+ SyncType string `protobuf:"bytes,5,opt,name=sync_type,json=syncType,proto3" json:"sync_type,omitempty"`
+ ParentSyncId string `protobuf:"bytes,6,opt,name=parent_sync_id,json=parentSyncId,proto3" json:"parent_sync_id,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -92,6 +94,20 @@ func (x *SyncRun) GetSyncToken() string {
return ""
}
+func (x *SyncRun) GetSyncType() string {
+ if x != nil {
+ return x.SyncType
+ }
+ return ""
+}
+
+func (x *SyncRun) GetParentSyncId() string {
+ if x != nil {
+ return x.ParentSyncId
+ }
+ return ""
+}
+
type SyncsReaderServiceGetSyncRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
SyncId string `protobuf:"bytes,1,opt,name=sync_id,json=syncId,proto3" json:"sync_id,omitempty"`
@@ -319,6 +335,7 @@ func (x *SyncsReaderServiceListSyncsResponse) GetAnnotations() []*anypb.Any {
type SyncsReaderServiceGetLatestFinishedSyncRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Annotations []*anypb.Any `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty"`
+ SyncType string `protobuf:"bytes,2,opt,name=sync_type,json=syncType,proto3" json:"sync_type,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -360,6 +377,13 @@ func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetAnnotations() []*any
return nil
}
+func (x *SyncsReaderServiceGetLatestFinishedSyncRequest) GetSyncType() string {
+ if x != nil {
+ return x.SyncType
+ }
+ return ""
+}
+
type SyncsReaderServiceGetLatestFinishedSyncResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Sync *SyncRun `protobuf:"bytes,1,opt,name=sync,proto3" json:"sync,omitempty"`
@@ -422,7 +446,7 @@ var file_c1_reader_v2_sync_proto_rawDesc = string([]byte{
0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaa, 0x01, 0x0a,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x01, 0x0a,
0x07, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
@@ -433,91 +457,97 @@ var file_c1_reader_v2_sync_proto_rawDesc = string([]byte{
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x79,
0x6e, 0x63, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
- 0x73, 0x79, 0x6e, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x73, 0x0a, 0x20, 0x53, 0x79, 0x6e,
- 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47,
- 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a,
- 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e,
- 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86,
- 0x01, 0x0a, 0x21, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76,
- 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x12,
- 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x53, 0x79, 0x6e, 0x63,
- 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69,
- 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27,
- 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01, 0x52, 0x08, 0x70,
- 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a,
- 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
- 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc1, 0x01,
- 0x0a, 0x23, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x79, 0x6e, 0x63, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x05, 0x73, 0x79, 0x6e,
- 0x63, 0x73, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a,
- 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74,
- 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e,
- 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
+ 0x73, 0x79, 0x6e, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x6e,
+ 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79,
+ 0x6e, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x22, 0x73, 0x0a, 0x20,
+ 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x22, 0x68, 0x0a, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74,
- 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x94, 0x01, 0x0a, 0x2f,
+ 0x73, 0x22, 0x86, 0x01, 0x0a, 0x21, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79,
+ 0x6e, 0x63, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x53,
+ 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x27, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x2a, 0x05, 0x18, 0xfa, 0x01, 0x40, 0x01,
+ 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2c, 0x0a, 0x0a, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d,
+ 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x22, 0xc1, 0x01, 0x0a, 0x23, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x73, 0x79, 0x6e, 0x63,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x05,
+ 0x73, 0x79, 0x6e, 0x63, 0x73, 0x12, 0x35, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d,
+ 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x20, 0x01, 0x28, 0x80, 0x10, 0xd0, 0x01, 0x01, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x36, 0x0a, 0x0b,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61,
+ 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x6e, 0x63, 0x54, 0x79, 0x70, 0x65, 0x22, 0x94, 0x01, 0x0a,
+ 0x2f, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69,
+ 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
+ 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79,
+ 0x6e, 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x36, 0x0a, 0x0b, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x32, 0x89, 0x03, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6a, 0x0a, 0x07, 0x47, 0x65,
+ 0x74, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79,
+ 0x6e, 0x63, 0x73, 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e,
+ 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74,
+ 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79,
+ 0x6e, 0x63, 0x12, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76,
+ 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e,
+ 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e,
0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73,
- 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x29, 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e,
- 0x63, 0x52, 0x75, 0x6e, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x12, 0x36, 0x0a, 0x0b, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x32, 0x89, 0x03, 0x0a, 0x12, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6a, 0x0a, 0x07, 0x47, 0x65, 0x74,
- 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2e, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e,
- 0x63, 0x73, 0x12, 0x30, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76,
- 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x94, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4c,
- 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e,
- 0x63, 0x12, 0x3c, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32,
- 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69,
- 0x73, 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x3d, 0x2e, 0x63, 0x31, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x76, 0x32, 0x2e, 0x53,
- 0x79, 0x6e, 0x63, 0x73, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68,
- 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33,
- 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e,
- 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e, 0x2d,
- 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x68, 0x65, 0x64, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42,
+ 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x64, 0x75, 0x63, 0x74, 0x6f, 0x72, 0x6f, 0x6e, 0x65, 0x2f, 0x62, 0x61, 0x74, 0x6f, 0x6e,
+ 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x31, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x2f, 0x76, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
diff --git a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.validate.go b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.validate.go
index 13702f99..b87264e2 100644
--- a/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.validate.go
+++ b/vendor/github.com/conductorone/baton-sdk/pb/c1/reader/v2/sync.pb.validate.go
@@ -118,6 +118,10 @@ func (m *SyncRun) validate(all bool) error {
// no validation rules for SyncToken
+ // no validation rules for SyncType
+
+ // no validation rules for ParentSyncId
+
if len(errors) > 0 {
return SyncRunMultiError(errors)
}
@@ -919,6 +923,8 @@ func (m *SyncsReaderServiceGetLatestFinishedSyncRequest) validate(all bool) erro
}
+ // no validation rules for SyncType
+
if len(errors) > 0 {
return SyncsReaderServiceGetLatestFinishedSyncRequestMultiError(errors)
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go
index 21fdaf07..5c0bf9ef 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/cli/commands.go
@@ -131,7 +131,7 @@ func MakeMainCommand[T field.Configurable](
v.GetString("revoke-grant"),
))
case v.GetBool("event-feed"):
- opts = append(opts, connectorrunner.WithOnDemandEventStream())
+ opts = append(opts, connectorrunner.WithOnDemandEventStream(v.GetString("event-feed-id"), v.GetTime("event-feed-start-at")))
case v.GetString("create-account-profile") != "":
profileMap := v.GetStringMap("create-account-profile")
if profileMap == nil {
@@ -219,6 +219,28 @@ func MakeMainCommand[T field.Configurable](
opts = append(opts,
connectorrunner.WithTicketingEnabled(),
connectorrunner.WithGetTicket(v.GetString("ticket-id")))
+ case len(v.GetStringSlice("sync-resources")) > 0:
+ opts = append(opts,
+ connectorrunner.WithTargetedSyncResourceIDs(v.GetStringSlice("sync-resources")),
+ connectorrunner.WithOnDemandSync(v.GetString("file")),
+ )
+ case v.GetBool("diff-syncs"):
+ opts = append(opts,
+ connectorrunner.WithDiffSyncs(
+ v.GetString("file"),
+ v.GetString("base-sync-id"),
+ v.GetString("applied-sync-id"),
+ ),
+ )
+ case v.GetBool("compact-syncs"):
+ opts = append(opts,
+ connectorrunner.WithSyncCompactor(
+ v.GetString("compact-output-path"),
+ v.GetStringSlice("compact-file-paths"),
+ v.GetStringSlice("compact-sync-ids"),
+ ),
+ )
+
default:
opts = append(opts, connectorrunner.WithOnDemandSync(v.GetString("file")))
}
@@ -401,6 +423,8 @@ func MakeGRPCServerCommand[T field.Configurable](
copts = append(copts, connector.WithTicketingEnabled())
case v.GetBool("get-ticket"):
copts = append(copts, connector.WithTicketingEnabled())
+ case len(v.GetStringSlice("sync-resources")) > 0:
+ copts = append(copts, connector.WithTargetedSyncResourceIDs(v.GetStringSlice("sync-resources")))
}
cw, err := connector.NewWrapper(runCtx, c, copts...)
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go
index 493788a4..e5232b3c 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/config/config.go
@@ -51,6 +51,12 @@ func DefineConfiguration[T field.Configurable](
// Ensure unique fields
uniqueFields := make(map[string]field.SchemaField)
for _, f := range confschema.Fields {
+ if s, ok := uniqueFields[f.FieldName]; ok {
+ if !(f.WasReExported || s.WasReExported) {
+ return nil, nil, fmt.Errorf("multiple fields with the same name: %s.If you want to use a default field in the SDK, use ExportAs on the connector schema field", f.FieldName)
+ }
+ }
+
uniqueFields[f.FieldName] = f
}
confschema.Fields = make([]field.SchemaField, 0, len(uniqueFields))
@@ -66,14 +72,13 @@ func DefineConfiguration[T field.Configurable](
SilenceUsage: true,
RunE: cli.MakeMainCommand(ctx, connectorName, v, confschema, connector, options...),
}
+
+ relationships := []field.SchemaFieldRelationship{}
// set persistent flags only on the main subcommand
- err = cli.SetFlagsAndConstraints(mainCMD, field.NewConfiguration(field.DefaultFields, field.DefaultRelationships...))
- if err != nil {
- return nil, nil, err
- }
+ relationships = append(relationships, field.DefaultRelationships...)
+ relationships = append(relationships, confschema.Constraints...)
- // set the rest of flags
- err = cli.SetFlagsAndConstraints(mainCMD, schema)
+ err = cli.SetFlagsAndConstraints(mainCMD, field.NewConfiguration(confschema.Fields, relationships...))
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go
index b454c808..80d86e0b 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorbuilder/connectorbuilder.go
@@ -22,6 +22,7 @@ import (
"github.com/conductorone/baton-sdk/pkg/crypto"
"github.com/conductorone/baton-sdk/pkg/metrics"
"github.com/conductorone/baton-sdk/pkg/pagination"
+ "github.com/conductorone/baton-sdk/pkg/retry"
"github.com/conductorone/baton-sdk/pkg/types"
"github.com/conductorone/baton-sdk/pkg/types/tasks"
"github.com/conductorone/baton-sdk/pkg/uhttp"
@@ -41,6 +42,7 @@ var tracer = otel.Tracer("baton-sdk/pkg.connectorbuilder")
// - ResourceDeleter: For deleting resources
// - AccountManager: For account provisioning operations
// - CredentialManager: For credential rotation operations.
+// - ResourceTargetedSyncer: For directly getting a resource supporting targeted sync.
type ResourceSyncer interface {
ResourceType(ctx context.Context) *v2.ResourceType
List(ctx context.Context, parentResourceID *v2.ResourceId, pToken *pagination.Token) ([]*v2.Resource, string, annotations.Annotations, error)
@@ -94,6 +96,15 @@ type ResourceDeleter interface {
Delete(ctx context.Context, resourceId *v2.ResourceId) (annotations.Annotations, error)
}
+// ResourceTargetedSyncer extends ResourceSyncer to add capabilities for directly syncing an individual resource
+//
+// Implementing this interface indicates the connector supports calling "get" on a resource
+// of the associated resource type.
+type ResourceTargetedSyncer interface {
+ ResourceSyncer
+ Get(ctx context.Context, resourceId *v2.ResourceId, parentResourceId *v2.ResourceId) (*v2.Resource, annotations.Annotations, error)
+}
+
// CreateAccountResponse is a semi-opaque type returned from CreateAccount operations.
//
// This is used to communicate the result of account creation back to Baton.
@@ -124,13 +135,57 @@ type CredentialManager interface {
RotateCapabilityDetails(ctx context.Context) (*v2.CredentialDetailsCredentialRotation, annotations.Annotations, error)
}
-// EventProvider extends ConnectorBuilder to add capabilities for providing event streams.
-//
-// Implementing this interface indicates the connector can provide a stream of events
-// from the external system, enabling near real-time updates in Baton.
+// Compatibility interface lets us handle both EventFeed and EventProvider the same.
+type EventLister interface {
+ ListEvents(ctx context.Context, earliestEvent *timestamppb.Timestamp, pToken *pagination.StreamToken) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error)
+}
+
+// Deprecated: This interface is deprecated in favor of EventProviderV2 which supports
+// multiple event feeds. Implementing this interface indicates the connector can provide
+// a single stream of events from the external system, enabling near real-time updates
+// in Baton. New connectors should implement EventProviderV2 instead.
type EventProvider interface {
ConnectorBuilder
- ListEvents(ctx context.Context, earliestEvent *timestamppb.Timestamp, pToken *pagination.StreamToken) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error)
+ EventLister
+}
+
+// NewEventProviderV2 is a new interface that allows connectors to provide multiple event feeds.
+//
+// This is the recommended interface for implementing event feed support in new connectors.
+type EventProviderV2 interface {
+ ConnectorBuilder
+ EventFeeds(ctx context.Context) []EventFeed
+}
+
+// EventFeed is a single stream of events from the external system.
+//
+// EventFeedMetadata describes this feed, and a connector can have multiple feeds.
+type EventFeed interface {
+ EventLister
+ EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata
+}
+
+type oldEventFeedWrapper struct {
+ feed EventLister
+}
+
+const (
+ LegacyBatonFeedId = "baton_feed_event"
+)
+
+func (e *oldEventFeedWrapper) EventFeedMetadata(ctx context.Context) *v2.EventFeedMetadata {
+ return &v2.EventFeedMetadata{
+ Id: LegacyBatonFeedId,
+ SupportedEventTypes: []v2.EventType{v2.EventType_EVENT_TYPE_UNSPECIFIED},
+ }
+}
+
+func (e *oldEventFeedWrapper) ListEvents(
+ ctx context.Context,
+ earliestEvent *timestamppb.Timestamp,
+ pToken *pagination.StreamToken,
+) ([]*v2.Event, *pagination.StreamState, annotations.Annotations, error) {
+ return e.feed.ListEvents(ctx, earliestEvent, pToken)
}
// TicketManager extends ConnectorBuilder to add capabilities for ticket management.
@@ -184,20 +239,21 @@ type ConnectorBuilder interface {
}
type builderImpl struct {
- resourceBuilders map[string]ResourceSyncer
- resourceProvisioners map[string]ResourceProvisioner
- resourceProvisionersV2 map[string]ResourceProvisionerV2
- resourceManagers map[string]ResourceManager
- resourceDeleters map[string]ResourceDeleter
- accountManager AccountManager
- actionManager CustomActionManager
- credentialManagers map[string]CredentialManager
- eventFeed EventProvider
- cb ConnectorBuilder
- ticketManager TicketManager
- ticketingEnabled bool
- m *metrics.M
- nowFunc func() time.Time
+ resourceBuilders map[string]ResourceSyncer
+ resourceProvisioners map[string]ResourceProvisioner
+ resourceProvisionersV2 map[string]ResourceProvisionerV2
+ resourceManagers map[string]ResourceManager
+ resourceDeleters map[string]ResourceDeleter
+ resourceTargetedSyncers map[string]ResourceTargetedSyncer
+ accountManager AccountManager
+ actionManager CustomActionManager
+ credentialManagers map[string]CredentialManager
+ eventFeeds map[string]EventFeed
+ cb ConnectorBuilder
+ ticketManager TicketManager
+ ticketingEnabled bool
+ m *metrics.M
+ nowFunc func() time.Time
}
func (b *builderImpl) BulkCreateTickets(ctx context.Context, request *v2.TicketsServiceBulkCreateTicketsRequest) (*v2.TicketsServiceBulkCreateTicketsResponse, error) {
@@ -395,17 +451,19 @@ func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.Conne
switch c := in.(type) {
case ConnectorBuilder:
ret := &builderImpl{
- resourceBuilders: make(map[string]ResourceSyncer),
- resourceProvisioners: make(map[string]ResourceProvisioner),
- resourceProvisionersV2: make(map[string]ResourceProvisionerV2),
- resourceManagers: make(map[string]ResourceManager),
- resourceDeleters: make(map[string]ResourceDeleter),
- accountManager: nil,
- actionManager: nil,
- credentialManagers: make(map[string]CredentialManager),
- cb: c,
- ticketManager: nil,
- nowFunc: time.Now,
+ resourceBuilders: make(map[string]ResourceSyncer),
+ resourceProvisioners: make(map[string]ResourceProvisioner),
+ resourceProvisionersV2: make(map[string]ResourceProvisionerV2),
+ resourceManagers: make(map[string]ResourceManager),
+ resourceDeleters: make(map[string]ResourceDeleter),
+ resourceTargetedSyncers: make(map[string]ResourceTargetedSyncer),
+ accountManager: nil,
+ actionManager: nil,
+ credentialManagers: make(map[string]CredentialManager),
+ eventFeeds: make(map[string]EventFeed),
+ cb: c,
+ ticketManager: nil,
+ nowFunc: time.Now,
}
err := ret.options(opts...)
@@ -417,8 +475,31 @@ func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.Conne
ret.m = metrics.New(metrics.NewNoOpHandler(ctx))
}
+ if b, ok := c.(EventProviderV2); ok {
+ for _, ef := range b.EventFeeds(ctx) {
+ feedData := ef.EventFeedMetadata(ctx)
+ if feedData == nil {
+ return nil, fmt.Errorf("error: event feed metadata is nil")
+ }
+ if err := feedData.Validate(); err != nil {
+ return nil, fmt.Errorf("error: event feed metadata for %s is invalid: %w", feedData.Id, err)
+ }
+ if _, ok := ret.eventFeeds[feedData.Id]; ok {
+ return nil, fmt.Errorf("error: duplicate event feed id found: %s", feedData.Id)
+ }
+ ret.eventFeeds[feedData.Id] = ef
+ }
+ }
+
if b, ok := c.(EventProvider); ok {
- ret.eventFeed = b
+ // Register the legacy Baton feed as a v2 event feed
+ // implementing both v1 and v2 event feeds is not supported.
+ if len(ret.eventFeeds) != 0 {
+ return nil, fmt.Errorf("error: using legacy event feed is not supported when using EventProviderV2")
+ }
+ ret.eventFeeds[LegacyBatonFeedId] = &oldEventFeedWrapper{
+ feed: b,
+ }
}
if ticketManager, ok := c.(TicketManager); ok {
@@ -472,6 +553,12 @@ func NewConnector(ctx context.Context, in interface{}, opts ...Opt) (types.Conne
}
ret.resourceProvisionersV2[rType.Id] = provisioner
}
+ if targetedSyncer, ok := rb.(ResourceTargetedSyncer); ok {
+ if _, ok := ret.resourceTargetedSyncers[rType.Id]; ok {
+ return nil, fmt.Errorf("error: duplicate resource type found for resource targeted syncer %s", rType.Id)
+ }
+ ret.resourceTargetedSyncers[rType.Id] = targetedSyncer
+ }
if resourceManager, ok := rb.(ResourceManager); ok {
if _, ok := ret.resourceManagers[rType.Id]; ok {
@@ -568,10 +655,20 @@ func (b *builderImpl) ListResourceTypes(
tt := tasks.ListResourceTypesType
var out []*v2.ResourceType
+ if len(b.resourceBuilders) == 0 {
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: no resource builders found")
+ }
+
for _, rb := range b.resourceBuilders {
out = append(out, rb.ResourceType(ctx))
}
+ if len(out) == 0 {
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: no resource types found")
+ }
+
b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
return &v2.ResourceTypesServiceListResourceTypesResponse{List: out}, nil
}
@@ -611,6 +708,36 @@ func (b *builderImpl) ListResources(ctx context.Context, request *v2.ResourcesSe
return resp, nil
}
+func (b *builderImpl) GetResource(ctx context.Context, request *v2.ResourceGetterServiceGetResourceRequest) (*v2.ResourceGetterServiceGetResourceResponse, error) {
+ ctx, span := tracer.Start(ctx, "builderImpl.GetResource")
+ defer span.End()
+
+ start := b.nowFunc()
+ tt := tasks.GetResourceType
+ resourceType := request.GetResourceId().GetResourceType()
+ rb, ok := b.resourceTargetedSyncers[resourceType]
+ if !ok {
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, status.Errorf(codes.Unimplemented, "error: get resource with unknown resource type %s", resourceType)
+ }
+
+ resource, annos, err := rb.Get(ctx, request.GetResourceId(), request.GetParentResourceId())
+ if err != nil {
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: get resource failed: %w", err)
+ }
+ if resource == nil {
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, status.Error(codes.NotFound, "error: get resource returned nil")
+ }
+
+ b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
+ return &v2.ResourceGetterServiceGetResourceResponse{
+ Resource: resource,
+ Annotations: annos,
+ }, nil
+}
+
// ListEntitlements returns all the entitlements for a given resource.
func (b *builderImpl) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) {
ctx, span := tracer.Start(ctx, "builderImpl.ListEntitlements")
@@ -779,6 +906,10 @@ func getCapabilities(ctx context.Context, b *builderImpl) (*v2.ConnectorCapabili
Capabilities: []v2.Capability{v2.Capability_CAPABILITY_SYNC},
}
connectorCaps[v2.Capability_CAPABILITY_SYNC] = struct{}{}
+ if _, ok := rb.(ResourceTargetedSyncer); ok {
+ resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_TARGETED_SYNC)
+ connectorCaps[v2.Capability_CAPABILITY_TARGETED_SYNC] = struct{}{}
+ }
if _, ok := rb.(ResourceProvisioner); ok {
resourceTypeCapability.Capabilities = append(resourceTypeCapability.Capabilities, v2.Capability_CAPABILITY_PROVISION)
connectorCaps[v2.Capability_CAPABILITY_PROVISION] = struct{}{}
@@ -811,8 +942,8 @@ func getCapabilities(ctx context.Context, b *builderImpl) (*v2.ConnectorCapabili
return resourceTypeCapabilities[i].ResourceType.GetId() < resourceTypeCapabilities[j].ResourceType.GetId()
})
- if b.eventFeed != nil {
- connectorCaps[v2.Capability_CAPABILITY_EVENT_FEED] = struct{}{}
+ if len(b.eventFeeds) > 0 {
+ connectorCaps[v2.Capability_CAPABILITY_EVENT_FEED_V2] = struct{}{}
}
if b.ticketManager != nil {
@@ -863,35 +994,48 @@ func (b *builderImpl) Grant(ctx context.Context, request *v2.GrantManagerService
l := ctxzap.Extract(ctx)
rt := request.Entitlement.Resource.Id.ResourceType
+
+ retryer := retry.NewRetryer(ctx, retry.RetryConfig{
+ MaxAttempts: 3,
+ InitialDelay: 15 * time.Second,
+ MaxDelay: 60 * time.Second,
+ })
+
+ var grantFunc func(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error)
provisioner, ok := b.resourceProvisioners[rt]
if ok {
- annos, err := provisioner.Grant(ctx, request.Principal, request.Entitlement)
- if err != nil {
- l.Error("error: grant failed", zap.Error(err))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: grant failed: %w", err)
+ grantFunc = func(ctx context.Context, principal *v2.Resource, entitlement *v2.Entitlement) ([]*v2.Grant, annotations.Annotations, error) {
+ annos, err := provisioner.Grant(ctx, principal, entitlement)
+ if err != nil {
+ return nil, annos, err
+ }
+ return nil, annos, nil
}
-
- b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
- return &v2.GrantManagerServiceGrantResponse{Annotations: annos}, nil
}
-
provisionerV2, ok := b.resourceProvisionersV2[rt]
if ok {
- grants, annos, err := provisionerV2.Grant(ctx, request.Principal, request.Entitlement)
- if err != nil {
- l.Error("error: grant failed", zap.Error(err))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: grant failed: %w", err)
- }
+ grantFunc = provisionerV2.Grant
+ }
- b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
- return &v2.GrantManagerServiceGrantResponse{Annotations: annos, Grants: grants}, nil
+ if grantFunc == nil {
+ l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt))
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: resource type does not have provisioner configured")
}
- l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: resource type does not have provisioner configured")
+ for {
+ grants, annos, err := grantFunc(ctx, request.Principal, request.Entitlement)
+ if err == nil {
+ b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
+ return &v2.GrantManagerServiceGrantResponse{Annotations: annos, Grants: grants}, nil
+ }
+ if retryer.ShouldWaitAndRetry(ctx, err) {
+ continue
+ }
+ l.Error("error: grant failed", zap.Error(err))
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("err: grant failed: %w", err)
+ }
}
func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServiceRevokeRequest) (*v2.GrantManagerServiceRevokeResponse, error) {
@@ -904,33 +1048,42 @@ func (b *builderImpl) Revoke(ctx context.Context, request *v2.GrantManagerServic
l := ctxzap.Extract(ctx)
rt := request.Grant.Entitlement.Resource.Id.ResourceType
+
+ retryer := retry.NewRetryer(ctx, retry.RetryConfig{
+ MaxAttempts: 3,
+ InitialDelay: 15 * time.Second,
+ MaxDelay: 60 * time.Second,
+ })
+
+ var revokeFunc func(ctx context.Context, grant *v2.Grant) (annotations.Annotations, error)
provisioner, ok := b.resourceProvisioners[rt]
if ok {
- annos, err := provisioner.Revoke(ctx, request.Grant)
- if err != nil {
- l.Error("error: revoke failed", zap.Error(err))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: revoke failed: %w", err)
- }
- return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil
+ revokeFunc = provisioner.Revoke
}
-
provisionerV2, ok := b.resourceProvisionersV2[rt]
if ok {
- annos, err := provisionerV2.Revoke(ctx, request.Grant)
- if err != nil {
- l.Error("error: revoke failed", zap.Error(err))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: revoke failed: %w", err)
- }
+ revokeFunc = provisionerV2.Revoke
+ }
- b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
- return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil
+ if revokeFunc == nil {
+ l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt))
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: resource type does not have provisioner configured")
}
- l.Error("error: resource type does not have provisioner configured", zap.String("resource_type", rt))
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, status.Error(codes.Unimplemented, "resource type does not have provisioner configured")
+ for {
+ annos, err := revokeFunc(ctx, request.Grant)
+ if err == nil {
+ b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
+ return &v2.GrantManagerServiceRevokeResponse{Annotations: annos}, nil
+ }
+ if retryer.ShouldWaitAndRetry(ctx, err) {
+ continue
+ }
+ l.Error("error: revoke failed", zap.Error(err))
+ b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
+ return nil, fmt.Errorf("error: revoke failed: %w", err)
+ }
}
// GetAsset streams the asset to the client.
@@ -942,17 +1095,44 @@ func (b *builderImpl) GetAsset(request *v2.AssetServiceGetAssetRequest, server v
return nil
}
+func (b *builderImpl) ListEventFeeds(ctx context.Context, request *v2.ListEventFeedsRequest) (*v2.ListEventFeedsResponse, error) {
+ ctx, span := tracer.Start(ctx, "builderImpl.ListEventFeeds")
+ defer span.End()
+
+ start := b.nowFunc()
+ tt := tasks.ListEventFeedsType
+
+ feeds := make([]*v2.EventFeedMetadata, 0, len(b.eventFeeds))
+
+ for _, feed := range b.eventFeeds {
+ feeds = append(feeds, feed.EventFeedMetadata(ctx))
+ }
+
+ b.m.RecordTaskSuccess(ctx, tt, b.nowFunc().Sub(start))
+ return &v2.ListEventFeedsResponse{
+ List: feeds,
+ }, nil
+}
+
func (b *builderImpl) ListEvents(ctx context.Context, request *v2.ListEventsRequest) (*v2.ListEventsResponse, error) {
ctx, span := tracer.Start(ctx, "builderImpl.ListEvents")
defer span.End()
start := b.nowFunc()
- tt := tasks.ListEventsType
- if b.eventFeed == nil {
- b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start))
- return nil, fmt.Errorf("error: event feed not implemented")
+ feedId := request.GetEventFeedId()
+
+ // If no feedId is provided, use the legacy Baton feed Id
+ if feedId == "" {
+ feedId = LegacyBatonFeedId
}
- events, streamState, annotations, err := b.eventFeed.ListEvents(ctx, request.StartAt, &pagination.StreamToken{
+
+ feed, ok := b.eventFeeds[feedId]
+ if !ok {
+ return nil, status.Errorf(codes.NotFound, "error: event feed not found")
+ }
+
+ tt := tasks.ListEventsType
+ events, streamState, annotations, err := feed.ListEvents(ctx, request.StartAt, &pagination.StreamToken{
Size: int(request.PageSize),
Cursor: request.Cursor,
})
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go
index 2b1aba42..d6468d82 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorrunner/runner.go
@@ -10,6 +10,7 @@ import (
"strings"
"time"
+ "github.com/conductorone/baton-sdk/pkg/synccompactor"
"golang.org/x/sync/semaphore"
"google.golang.org/protobuf/types/known/structpb"
@@ -44,20 +45,36 @@ var ErrSigTerm = errors.New("context cancelled by process shutdown")
// Run starts a connector and creates a new C1Z file.
func (c *connectorRunner) Run(ctx context.Context) error {
+ l := ctxzap.Extract(ctx)
ctx, cancel := context.WithCancelCause(ctx)
defer cancel(ErrSigTerm)
if c.tasks.ShouldDebug() && c.debugFile == nil {
var err error
- c.debugFile, err = os.Create(filepath.Join(c.tasks.GetTempDir(), "debug.log"))
+ tempDir := c.tasks.GetTempDir()
+ if tempDir == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ l.Warn("unable to get the current working directory", zap.Error(err))
+ }
+
+ if wd != "" {
+ l.Warn("no temporal folder found on this system according to our task manager,"+
+ " we may create files in the current working directory by mistake as a result",
+ zap.String("current working directory", wd))
+ } else {
+ l.Warn("no temporal folder found on this system according to our task manager")
+ }
+ }
+ debugFile := filepath.Join(tempDir, "debug.log")
+ c.debugFile, err = os.Create(debugFile)
if err != nil {
- return err
+ l.Warn("cannot create file", zap.String("full file path", debugFile), zap.Error(err))
}
}
// modify the context to insert a logger directed to a file
if c.debugFile != nil {
- l := ctxzap.Extract(ctx)
writeSyncer := zapcore.AddSync(c.debugFile)
encoder := zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig())
core := zapcore.NewCore(encoder, writeSyncer, zapcore.DebugLevel)
@@ -101,6 +118,7 @@ func (c *connectorRunner) handleContextCancel(ctx context.Context) error {
l.Debug("runner: unexpected context cancellation", zap.Error(err))
return err
}
+
func (c *connectorRunner) processTask(ctx context.Context, task *v1.Task) error {
cc, err := c.cw.C(ctx)
if err != nil {
@@ -280,6 +298,19 @@ type rotateCredentialsConfig struct {
}
type eventStreamConfig struct {
+ feedId string
+ startAt time.Time
+}
+
+type syncDifferConfig struct {
+ baseSyncID string
+ appliedSyncID string
+}
+
+type syncCompactorConfig struct {
+ filePaths []string
+ syncIDs []string
+ outputPath string
}
type runnerConfig struct {
@@ -303,7 +334,10 @@ type runnerConfig struct {
bulkCreateTicketConfig *bulkCreateTicketConfig
listTicketSchemasConfig *listTicketSchemasConfig
getTicketConfig *getTicketConfig
+ syncDifferConfig *syncDifferConfig
+ syncCompactorConfig *syncCompactorConfig
skipFullSync bool
+ targetedSyncResourceIDs []string
externalResourceC1Z string
externalResourceEntitlementIdFilter string
}
@@ -395,6 +429,7 @@ func WithOnDemandGrant(c1zPath string, entitlementID string, principalID string,
return nil
}
}
+
func WithClientCredentials(clientID string, clientSecret string) Option {
return func(ctx context.Context, cfg *runnerConfig) error {
cfg.clientID = clientID
@@ -459,10 +494,14 @@ func WithOnDemandSync(c1zPath string) Option {
return nil
}
}
-func WithOnDemandEventStream() Option {
+
+func WithOnDemandEventStream(feedId string, startAt time.Time) Option {
return func(ctx context.Context, cfg *runnerConfig) error {
cfg.onDemand = true
- cfg.eventFeedConfig = &eventStreamConfig{}
+ cfg.eventFeedConfig = &eventStreamConfig{
+ feedId: feedId,
+ startAt: startAt,
+ }
return nil
}
}
@@ -481,6 +520,13 @@ func WithFullSyncDisabled() Option {
}
}
+func WithTargetedSyncResourceIDs(resourceIDs []string) Option {
+ return func(ctx context.Context, cfg *runnerConfig) error {
+ cfg.targetedSyncResourceIDs = resourceIDs
+ return nil
+ }
+}
+
func WithTicketingEnabled() Option {
return func(ctx context.Context, cfg *runnerConfig) error {
cfg.ticketingEnabled = true
@@ -547,6 +593,32 @@ func WithExternalResourceEntitlementFilter(entitlementId string) Option {
}
}
+func WithDiffSyncs(c1zPath string, baseSyncID string, newSyncID string) Option {
+ return func(ctx context.Context, cfg *runnerConfig) error {
+ cfg.onDemand = true
+ cfg.c1zPath = c1zPath
+ cfg.syncDifferConfig = &syncDifferConfig{
+ baseSyncID: baseSyncID,
+ appliedSyncID: newSyncID,
+ }
+ return nil
+ }
+}
+
+func WithSyncCompactor(outputPath string, filePaths []string, syncIDs []string) Option {
+ return func(ctx context.Context, cfg *runnerConfig) error {
+ cfg.onDemand = true
+ cfg.c1zPath = "dummy"
+
+ cfg.syncCompactorConfig = &syncCompactorConfig{
+ filePaths: filePaths,
+ syncIDs: syncIDs,
+ outputPath: outputPath,
+ }
+ return nil
+ }
+}
+
// NewConnectorRunner creates a new connector runner.
func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Option) (*connectorRunner, error) {
runner := &connectorRunner{}
@@ -578,6 +650,10 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op
wrapperOpts = append(wrapperOpts, connector.WithFullSyncDisabled())
}
+ if len(cfg.targetedSyncResourceIDs) > 0 {
+ wrapperOpts = append(wrapperOpts, connector.WithTargetedSyncResourceIDs(cfg.targetedSyncResourceIDs))
+ }
+
cw, err := connector.NewWrapper(ctx, c, wrapperOpts...)
if err != nil {
return nil, err
@@ -614,7 +690,7 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op
tm = local.NewCredentialRotator(ctx, cfg.c1zPath, cfg.rotateCredentialsConfig.resourceId, cfg.rotateCredentialsConfig.resourceType)
case cfg.eventFeedConfig != nil:
- tm = local.NewEventFeed(ctx)
+ tm = local.NewEventFeed(ctx, cfg.eventFeedConfig.feedId, cfg.eventFeedConfig.startAt)
case cfg.createTicketConfig != nil:
tm = local.NewTicket(ctx, cfg.createTicketConfig.templatePath)
case cfg.listTicketSchemasConfig != nil:
@@ -623,11 +699,28 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op
tm = local.NewGetTicket(ctx, cfg.getTicketConfig.ticketID)
case cfg.bulkCreateTicketConfig != nil:
tm = local.NewBulkTicket(ctx, cfg.bulkCreateTicketConfig.templatePath)
+ case cfg.syncDifferConfig != nil:
+ tm = local.NewDiffer(ctx, cfg.c1zPath, cfg.syncDifferConfig.baseSyncID, cfg.syncDifferConfig.appliedSyncID)
+ case cfg.syncCompactorConfig != nil:
+ c := cfg.syncCompactorConfig
+ if len(c.filePaths) != len(c.syncIDs) {
+ return nil, errors.New("sync-compactor: must include exactly one syncID per file")
+ }
+ configs := make([]*synccompactor.CompactableSync, 0, len(c.filePaths))
+ for i, filePath := range c.filePaths {
+ configs = append(configs, &synccompactor.CompactableSync{
+ FilePath: filePath,
+ SyncID: c.syncIDs[i],
+ })
+ }
+ tm = local.NewLocalCompactor(ctx, cfg.syncCompactorConfig.outputPath, configs)
default:
tm, err = local.NewSyncer(ctx, cfg.c1zPath,
local.WithTmpDir(cfg.tempDir),
local.WithExternalResourceC1Z(cfg.externalResourceC1Z),
- local.WithExternalResourceEntitlementIdFilter(cfg.externalResourceEntitlementIdFilter))
+ local.WithExternalResourceEntitlementIdFilter(cfg.externalResourceEntitlementIdFilter),
+ local.WithTargetedSyncResourceIDs(cfg.targetedSyncResourceIDs),
+ )
if err != nil {
return nil, err
}
@@ -639,7 +732,7 @@ func NewConnectorRunner(ctx context.Context, c types.ConnectorServer, opts ...Op
return runner, nil
}
- tm, err := c1api.NewC1TaskManager(ctx, cfg.clientID, cfg.clientSecret, cfg.tempDir, cfg.skipFullSync, cfg.externalResourceC1Z, cfg.externalResourceEntitlementIdFilter)
+ tm, err := c1api.NewC1TaskManager(ctx, cfg.clientID, cfg.clientSecret, cfg.tempDir, cfg.skipFullSync, cfg.externalResourceC1Z, cfg.externalResourceEntitlementIdFilter, cfg.targetedSyncResourceIDs)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go
index 7ca90658..71db3b92 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/connectorstore/connectorstore.go
@@ -36,6 +36,8 @@ type Writer interface {
Reader
StartSync(ctx context.Context) (string, bool, error)
StartNewSync(ctx context.Context) (string, error)
+ StartNewSyncV2(ctx context.Context, syncType string, parentSyncID string) (string, error)
+ SetCurrentSync(ctx context.Context, syncID string) error
CurrentSyncStep(ctx context.Context) (string, error)
CheckpointSync(ctx context.Context, syncToken string) error
EndSync(ctx context.Context) error
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go
index 6fc9066f..035fbcf6 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/assets.go
@@ -49,6 +49,10 @@ func (r *assetsTable) Schema() (string, []interface{}) {
}
}
+func (r *assetsTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ return nil
+}
+
// PutAsset stores the given asset in the database.
func (c *C1File) PutAsset(ctx context.Context, assetRef *v2.AssetRef, contentType string, data []byte) error {
ctx, span := tracer.Start(ctx, "C1File.PutAsset")
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go
index 87fe0270..11bf0fc6 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/c1file.go
@@ -93,6 +93,11 @@ func NewC1File(ctx context.Context, dbFilePath string, opts ...C1FOption) (*C1Fi
return nil, err
}
+ err = c1File.init(ctx)
+ if err != nil {
+ return nil, err
+ }
+
return c1File, nil
}
@@ -141,11 +146,6 @@ func NewC1ZFile(ctx context.Context, outputFilePath string, opts ...C1ZOption) (
c1File.outputFilePath = outputFilePath
- err = c1File.init(ctx)
- if err != nil {
- return nil, err
- }
-
return c1File, nil
}
@@ -192,11 +192,14 @@ func (c *C1File) init(ctx context.Context) error {
for _, t := range allTableDescriptors {
query, args := t.Schema()
-
_, err = c.db.ExecContext(ctx, fmt.Sprintf(query, args...))
if err != nil {
return err
}
+ err = t.Migrations(ctx, c.db)
+ if err != nil {
+ return err
+ }
}
for _, pragma := range c.pragmas {
@@ -288,3 +291,10 @@ func (c *C1File) validateSyncDb(ctx context.Context) error {
return c.validateDb(ctx)
}
+
+func (c *C1File) OutputFilepath() (string, error) {
+ if c.outputFilePath == "" {
+ return "", fmt.Errorf("c1file: output file path is empty")
+ }
+ return c.outputFilePath, nil
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go
new file mode 100644
index 00000000..476b4ccc
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/diff.go
@@ -0,0 +1,113 @@
+package dotc1z
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/doug-martin/goqu/v9"
+ "github.com/segmentio/ksuid"
+)
+
+func (c *C1File) GenerateSyncDiff(ctx context.Context, baseSyncID string, appliedSyncID string) (string, error) {
+ // Validate that both sync runs exist
+ baseSync, err := c.getSync(ctx, baseSyncID)
+ if err != nil {
+ return "", err
+ }
+ if baseSync == nil {
+ return "", fmt.Errorf("generate-diff: base sync not found")
+ }
+
+ newSync, err := c.getSync(ctx, appliedSyncID)
+ if err != nil {
+ return "", err
+ }
+ if newSync == nil {
+ return "", fmt.Errorf("generate-diff: new sync not found")
+ }
+
+ // Generate a new unique ID for the diff sync
+ diffSyncID := ksuid.New().String()
+
+ if err := c.insertSyncRun(ctx, diffSyncID, SyncTypePartial, baseSyncID); err != nil {
+ return "", err
+ }
+
+ for _, t := range allTableDescriptors {
+ if strings.Contains(t.Name(), syncRunsTableName) {
+ continue
+ }
+
+ q, args, err := c.diffTableQuery(t, baseSyncID, appliedSyncID, diffSyncID)
+ if err != nil {
+ return "", err
+ }
+ _, err = c.db.ExecContext(ctx, q, args...)
+ if err != nil {
+ return "", err
+ }
+ c.dbUpdated = true
+ }
+
+ if err := c.endSyncRun(ctx, diffSyncID); err != nil {
+ return "", err
+ }
+
+ return diffSyncID, nil
+}
+
+func (c *C1File) diffTableQuery(table tableDescriptor, baseSyncID, appliedSyncID, newSyncID string) (string, []any, error) {
+ // Define the columns to select based on the table name
+ columns := []interface{}{
+ "external_id",
+ "data",
+ "sync_id",
+ "discovered_at",
+ }
+
+ tableName := table.Name()
+ // Add table-specific columns
+ switch {
+ case strings.Contains(tableName, resourcesTableName):
+ columns = append(columns, "resource_type_id", "parent_resource_type_id", "parent_resource_id")
+ case strings.Contains(tableName, resourceTypesTableName):
+ // Nothing new to add here
+ case strings.Contains(tableName, grantsTableName):
+ columns = append(columns, "resource_type_id", "resource_id", "entitlement_id", "principal_resource_type_id", "principal_resource_id")
+ case strings.Contains(tableName, entitlementsTableName):
+ columns = append(columns, "resource_type_id", "resource_id")
+ case strings.Contains(tableName, assetsTableName):
+ columns = append(columns, "content_type")
+ }
+
+ // Build the subquery to find external_ids in the base sync
+ subquery := c.db.Select("external_id").
+ From(tableName).
+ Where(goqu.C("sync_id").Eq(baseSyncID))
+
+ queryColumns := []interface{}{}
+ for _, col := range columns {
+ if col == "sync_id" {
+ queryColumns = append(queryColumns, goqu.L(fmt.Sprintf("'%s' as sync_id", newSyncID)))
+ continue
+ }
+ queryColumns = append(queryColumns, col)
+ }
+
+ // Build the main query to select records from newSyncID that don't exist in baseSyncID
+ query := c.db.Insert(tableName).
+ Cols(columns...).
+ Prepared(true).
+ FromQuery(
+ c.db.Select(queryColumns...).
+ From(tableName).
+ Where(
+ goqu.C("sync_id").Eq(appliedSyncID),
+ goqu.C("external_id").NotIn(subquery),
+ ),
+ )
+
+ // Generate the SQL and args
+ return query.ToSQL()
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go
index 55a8a408..9456ae77 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/entitlements.go
@@ -49,6 +49,10 @@ func (r *entitlementsTable) Schema() (string, []interface{}) {
}
}
+func (r *entitlementsTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ return nil
+}
+
func (c *C1File) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) {
ctx, span := tracer.Start(ctx, "C1File.ListEntitlements")
defer span.End()
@@ -97,7 +101,20 @@ func (c *C1File) PutEntitlements(ctx context.Context, entitlementObjs ...*v2.Ent
ctx, span := tracer.Start(ctx, "C1File.PutEntitlements")
defer span.End()
- err := bulkPutConnectorObject(ctx, c, entitlements.Name(),
+ return c.putEntitlementsInternal(ctx, bulkPutConnectorObject, entitlementObjs...)
+}
+
+func (c *C1File) PutEntitlementsIfNewer(ctx context.Context, entitlementObjs ...*v2.Entitlement) error {
+ ctx, span := tracer.Start(ctx, "C1File.PutEntitlementsIfNewer")
+ defer span.End()
+
+ return c.putEntitlementsInternal(ctx, bulkPutConnectorObjectIfNewer, entitlementObjs...)
+}
+
+type entitlementPutFunc func(context.Context, *C1File, string, func(m *v2.Entitlement) (goqu.Record, error), ...*v2.Entitlement) error
+
+func (c *C1File) putEntitlementsInternal(ctx context.Context, f entitlementPutFunc, entitlementObjs ...*v2.Entitlement) error {
+ err := f(ctx, c, entitlements.Name(),
func(entitlement *v2.Entitlement) (goqu.Record, error) {
return goqu.Record{
"resource_id": entitlement.Resource.Id.Resource,
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go
index 972dfe24..4edc283d 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/grants.go
@@ -58,6 +58,10 @@ func (r *grantsTable) Schema() (string, []interface{}) {
}
}
+func (r *grantsTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ return nil
+}
+
func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) {
ctx, span := tracer.Start(ctx, "C1File.ListGrants")
defer span.End()
@@ -190,7 +194,20 @@ func (c *C1File) PutGrants(ctx context.Context, bulkGrants ...*v2.Grant) error {
ctx, span := tracer.Start(ctx, "C1File.PutGrants")
defer span.End()
- err := bulkPutConnectorObject(ctx, c, grants.Name(),
+ return c.putGrantsInternal(ctx, bulkPutConnectorObject, bulkGrants...)
+}
+
+func (c *C1File) PutGrantsIfNewer(ctx context.Context, bulkGrants ...*v2.Grant) error {
+ ctx, span := tracer.Start(ctx, "C1File.PutGrantsIfNewer")
+ defer span.End()
+
+ return c.putGrantsInternal(ctx, bulkPutConnectorObjectIfNewer, bulkGrants...)
+}
+
+type grantPutFunc func(context.Context, *C1File, string, func(m *v2.Grant) (goqu.Record, error), ...*v2.Grant) error
+
+func (c *C1File) putGrantsInternal(ctx context.Context, f grantPutFunc, bulkGrants ...*v2.Grant) error {
+ err := f(ctx, c, grants.Name(),
func(grant *v2.Grant) (goqu.Record, error) {
return goqu.Record{
"resource_type_id": grant.Entitlement.Resource.Id.ResourceType,
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go
index 9197e751..53a7f64e 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resouce_types.go
@@ -45,6 +45,10 @@ func (r *resourceTypesTable) Schema() (string, []interface{}) {
}
}
+func (r *resourceTypesTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ return nil
+}
+
func (c *C1File) ListResourceTypes(ctx context.Context, request *v2.ResourceTypesServiceListResourceTypesRequest) (*v2.ResourceTypesServiceListResourceTypesResponse, error) {
ctx, span := tracer.Start(ctx, "C1File.ListResourceTypes")
defer span.End()
@@ -93,7 +97,20 @@ func (c *C1File) PutResourceTypes(ctx context.Context, resourceTypesObjs ...*v2.
ctx, span := tracer.Start(ctx, "C1File.PutResourceTypes")
defer span.End()
- err := bulkPutConnectorObject(ctx, c, resourceTypes.Name(),
+ return c.putResourceTypesInternal(ctx, bulkPutConnectorObject, resourceTypesObjs...)
+}
+
+func (c *C1File) PutResourceTypesIfNewer(ctx context.Context, resourceTypesObjs ...*v2.ResourceType) error {
+ ctx, span := tracer.Start(ctx, "C1File.PutResourceTypesIfNewer")
+ defer span.End()
+
+ return c.putResourceTypesInternal(ctx, bulkPutConnectorObjectIfNewer, resourceTypesObjs...)
+}
+
+type resourceTypePutFunc func(context.Context, *C1File, string, func(m *v2.ResourceType) (goqu.Record, error), ...*v2.ResourceType) error
+
+func (c *C1File) putResourceTypesInternal(ctx context.Context, f resourceTypePutFunc, resourceTypesObjs ...*v2.ResourceType) error {
+ err := f(ctx, c, resourceTypes.Name(),
func(resource *v2.ResourceType) (goqu.Record, error) {
return nil, nil
},
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go
index 84d16f6b..b28b8bb5 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/resources.go
@@ -54,6 +54,10 @@ func (r *resourcesTable) Schema() (string, []interface{}) {
}
}
+func (r *resourcesTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ return nil
+}
+
func (c *C1File) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) {
ctx, span := tracer.Start(ctx, "C1File.ListResources")
defer span.End()
@@ -102,7 +106,20 @@ func (c *C1File) PutResources(ctx context.Context, resourceObjs ...*v2.Resource)
ctx, span := tracer.Start(ctx, "C1File.PutResources")
defer span.End()
- err := bulkPutConnectorObject(ctx, c, resources.Name(),
+ return c.putResourcesInternal(ctx, bulkPutConnectorObject, resourceObjs...)
+}
+
+func (c *C1File) PutResourcesIfNewer(ctx context.Context, resourceObjs ...*v2.Resource) error {
+ ctx, span := tracer.Start(ctx, "C1File.PutResourcesIfNewer")
+ defer span.End()
+
+ return c.putResourcesInternal(ctx, bulkPutConnectorObjectIfNewer, resourceObjs...)
+}
+
+type resourcePutFunc func(context.Context, *C1File, string, func(m *v2.Resource) (goqu.Record, error), ...*v2.Resource) error
+
+func (c *C1File) putResourcesInternal(ctx context.Context, f resourcePutFunc, resourceObjs ...*v2.Resource) error {
+ err := f(ctx, c, resources.Name(),
func(resource *v2.Resource) (goqu.Record, error) {
fields := goqu.Record{
"resource_type_id": resource.Id.ResourceType,
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go
index 931a3245..01e1e4bb 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sql_helpers.go
@@ -32,6 +32,7 @@ type tableDescriptor interface {
Name() string
Schema() (string, []interface{})
Version() string
+ Migrations(ctx context.Context, db *goqu.Database) error
}
type listRequest interface {
@@ -178,7 +179,7 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req
default:
var latestSyncRun *syncRun
var err error
- latestSyncRun, err = c.getFinishedSync(ctx, 0)
+ latestSyncRun, err = c.getFinishedSync(ctx, 0, SyncTypeFull)
if err != nil {
return nil, "", err
}
@@ -263,31 +264,22 @@ func (c *C1File) listConnectorObjects(ctx context.Context, tableName string, req
var protoMarshaler = proto.MarshalOptions{Deterministic: true}
-func bulkPutConnectorObject[T proto.Message](ctx context.Context, c *C1File,
- tableName string,
+// prepareConnectorObjectRows prepares the rows for bulk insertion.
+func prepareConnectorObjectRows[T proto.Message](
+ c *C1File,
+ msgs []T,
extractFields func(m T) (goqu.Record, error),
- msgs ...T) error {
- if len(msgs) == 0 {
- return nil
- }
- ctx, span := tracer.Start(ctx, "C1File.bulkPutConnectorObjectTx")
- defer span.End()
-
- err := c.validateSyncDb(ctx)
- if err != nil {
- return err
- }
-
+) ([]*goqu.Record, error) {
rows := make([]*goqu.Record, len(msgs))
for i, m := range msgs {
messageBlob, err := protoMarshaler.Marshal(m)
if err != nil {
- return err
+ return nil, err
}
fields, err := extractFields(m)
if err != nil {
- return err
+ return nil, err
}
if fields == nil {
fields = goqu.Record{}
@@ -296,7 +288,7 @@ func bulkPutConnectorObject[T proto.Message](ctx context.Context, c *C1File,
if _, idSet := fields["external_id"]; !idSet {
idGetter, ok := any(m).(protoHasID)
if !ok {
- return fmt.Errorf("unable to get ID for object")
+ return nil, fmt.Errorf("unable to get ID for object")
}
fields["external_id"] = idGetter.GetId()
}
@@ -305,6 +297,17 @@ func bulkPutConnectorObject[T proto.Message](ctx context.Context, c *C1File,
fields["discovered_at"] = time.Now().Format("2006-01-02 15:04:05.999999999")
rows[i] = &fields
}
+ return rows, nil
+}
+
+// executeChunkedInsert executes the insert query in chunks.
+func executeChunkedInsert(
+ ctx context.Context,
+ c *C1File,
+ tableName string,
+ rows []*goqu.Record,
+ buildQueryFn func(*goqu.InsertDataset, []*goqu.Record) (*goqu.InsertDataset, error),
+) error {
chunkSize := 100
chunks := len(rows) / chunkSize
if len(rows)%chunkSize != 0 {
@@ -318,14 +321,23 @@ func bulkPutConnectorObject[T proto.Message](ctx context.Context, c *C1File,
end = len(rows)
}
chunkedRows := rows[start:end]
- query, args, err := c.db.Insert(tableName).
- OnConflict(goqu.DoUpdate("external_id, sync_id", goqu.C("data").Set(goqu.I("EXCLUDED.data")))).
- Rows(chunkedRows).
- Prepared(true).
- ToSQL()
+
+ // Create the base insert dataset
+ insertDs := c.db.Insert(tableName)
+
+ // Apply the custom query building function
+ insertDs, err := buildQueryFn(insertDs, chunkedRows)
if err != nil {
return err
}
+
+ // Generate the SQL
+ query, args, err := insertDs.ToSQL()
+ if err != nil {
+ return err
+ }
+
+ // Execute the query
_, err = c.db.Exec(query, args...)
if err != nil {
return err
@@ -335,6 +347,82 @@ func bulkPutConnectorObject[T proto.Message](ctx context.Context, c *C1File,
return nil
}
+func bulkPutConnectorObject[T proto.Message](
+ ctx context.Context, c *C1File,
+ tableName string,
+ extractFields func(m T) (goqu.Record, error),
+ msgs ...T,
+) error {
+ if len(msgs) == 0 {
+ return nil
+ }
+ ctx, span := tracer.Start(ctx, "C1File.bulkPutConnectorObject")
+ defer span.End()
+
+ err := c.validateSyncDb(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Prepare rows
+ rows, err := prepareConnectorObjectRows(c, msgs, extractFields)
+ if err != nil {
+ return err
+ }
+
+ // Define query building function
+ buildQueryFn := func(insertDs *goqu.InsertDataset, chunkedRows []*goqu.Record) (*goqu.InsertDataset, error) {
+ return insertDs.
+ OnConflict(goqu.DoUpdate("external_id, sync_id", goqu.C("data").Set(goqu.I("EXCLUDED.data")))).
+ Rows(chunkedRows).
+ Prepared(true), nil
+ }
+
+ // Execute the insert
+ return executeChunkedInsert(ctx, c, tableName, rows, buildQueryFn)
+}
+
+func bulkPutConnectorObjectIfNewer[T proto.Message](
+ ctx context.Context, c *C1File,
+ tableName string,
+ extractFields func(m T) (goqu.Record, error),
+ msgs ...T,
+) error {
+ if len(msgs) == 0 {
+ return nil
+ }
+ ctx, span := tracer.Start(ctx, "C1File.bulkPutConnectorObjectIfNewer")
+ defer span.End()
+
+ err := c.validateSyncDb(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Prepare rows
+ rows, err := prepareConnectorObjectRows(c, msgs, extractFields)
+ if err != nil {
+ return err
+ }
+
+ // Define query building function
+ buildQueryFn := func(insertDs *goqu.InsertDataset, chunkedRows []*goqu.Record) (*goqu.InsertDataset, error) {
+ return insertDs.
+ OnConflict(goqu.DoUpdate("external_id, sync_id",
+ goqu.Record{
+ "data": goqu.I("EXCLUDED.data"),
+ "discovered_at": goqu.I("EXCLUDED.discovered_at"),
+ }).Where(
+ goqu.L("EXCLUDED.discovered_at > ?.discovered_at", goqu.I(tableName)),
+ )).
+ Rows(chunkedRows).
+ Prepared(true), nil
+ }
+
+ // Execute the insert
+ return executeChunkedInsert(ctx, c, tableName, rows, buildQueryFn)
+}
+
func (c *C1File) getResourceObject(ctx context.Context, resourceID *v2.ResourceId, m *v2.Resource, syncID string) error {
ctx, span := tracer.Start(ctx, "C1File.getResourceObject")
defer span.End()
@@ -359,7 +447,7 @@ func (c *C1File) getResourceObject(ctx context.Context, resourceID *v2.ResourceI
default:
var latestSyncRun *syncRun
var err error
- latestSyncRun, err = c.getFinishedSync(ctx, 0)
+ latestSyncRun, err = c.getFinishedSync(ctx, 0, SyncTypeFull)
if err != nil {
return err
}
@@ -419,15 +507,15 @@ func (c *C1File) getConnectorObject(ctx context.Context, tableName string, id st
default:
var latestSyncRun *syncRun
var err error
- latestSyncRun, err = c.getFinishedSync(ctx, 0)
+ latestSyncRun, err = c.getFinishedSync(ctx, 0, SyncTypeAny)
if err != nil {
- return err
+ return fmt.Errorf("error getting finished sync: %w", err)
}
if latestSyncRun == nil {
latestSyncRun, err = c.getLatestUnfinishedSync(ctx)
if err != nil {
- return err
+ return fmt.Errorf("error getting latest unfinished sync: %w", err)
}
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go
index 69f1170a..96fbfc9a 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/dotc1z/sync_runs.go
@@ -26,7 +26,9 @@ create table if not exists %s (
sync_id text not null,
started_at datetime not null,
ended_at datetime,
- sync_token text not null
+ sync_token text not null,
+ sync_type text not null default 'full',
+ parent_sync_id text not null default ''
);
create unique index if not exists %s on %s (sync_id);`
@@ -50,11 +52,51 @@ func (r *syncRunsTable) Schema() (string, []interface{}) {
}
}
+func (r *syncRunsTable) Migrations(ctx context.Context, db *goqu.Database) error {
+ // Check if sync_type column exists
+ var syncTypeExists int
+ err := db.QueryRowContext(ctx, fmt.Sprintf("select count(*) from pragma_table_info('%s') where name='sync_type'", r.Name())).Scan(&syncTypeExists)
+ if err != nil {
+ return err
+ }
+ if syncTypeExists == 0 {
+ _, err = db.ExecContext(ctx, fmt.Sprintf("alter table %s add column sync_type text not null default 'full'", r.Name()))
+ if err != nil {
+ return err
+ }
+ }
+
+ // Check if parent_sync_id column exists
+ var parentSyncIDExists int
+ err = db.QueryRowContext(ctx, fmt.Sprintf("select count(*) from pragma_table_info('%s') where name='parent_sync_id'", r.Name())).Scan(&parentSyncIDExists)
+ if err != nil {
+ return err
+ }
+ if parentSyncIDExists == 0 {
+ _, err = db.ExecContext(ctx, fmt.Sprintf("alter table %s add column parent_sync_id text not null default ''", r.Name()))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type SyncType string
+
+const (
+ SyncTypeFull SyncType = "full"
+ SyncTypePartial SyncType = "partial"
+ SyncTypeAny SyncType = ""
+)
+
type syncRun struct {
- ID string
- StartedAt *time.Time
- EndedAt *time.Time
- SyncToken string
+ ID string
+ StartedAt *time.Time
+ EndedAt *time.Time
+ SyncToken string
+ Type SyncType
+ ParentSyncID string
}
func (c *C1File) getLatestUnfinishedSync(ctx context.Context) (*syncRun, error) {
@@ -70,7 +112,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context) (*syncRun, error)
oneWeekAgo := time.Now().AddDate(0, 0, -7)
ret := &syncRun{}
q := c.db.From(syncRuns.Name())
- q = q.Select("sync_id", "started_at", "ended_at", "sync_token")
+ q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id")
q = q.Where(goqu.C("ended_at").IsNull())
q = q.Where(goqu.C("started_at").Gte(oneWeekAgo))
q = q.Order(goqu.C("started_at").Desc())
@@ -83,7 +125,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context) (*syncRun, error)
row := c.db.QueryRowContext(ctx, query, args...)
- err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken)
+ err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
@@ -94,7 +136,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context) (*syncRun, error)
return ret, nil
}
-func (c *C1File) getFinishedSync(ctx context.Context, offset uint) (*syncRun, error) {
+func (c *C1File) getFinishedSync(ctx context.Context, offset uint, syncType SyncType) (*syncRun, error) {
ctx, span := tracer.Start(ctx, "C1File.getFinishedSync")
defer span.End()
@@ -103,10 +145,18 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint) (*syncRun, er
return nil, err
}
+ // Validate syncType
+ if syncType != SyncTypeFull && syncType != SyncTypePartial && syncType != SyncTypeAny {
+ return nil, fmt.Errorf("invalid sync type: %s", syncType)
+ }
+
ret := &syncRun{}
q := c.db.From(syncRuns.Name())
- q = q.Select("sync_id", "started_at", "ended_at", "sync_token")
+ q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id")
q = q.Where(goqu.C("ended_at").IsNotNull())
+ if syncType != SyncTypeAny {
+ q = q.Where(goqu.C("sync_type").Eq(syncType))
+ }
q = q.Order(goqu.C("ended_at").Desc())
q = q.Limit(1)
@@ -121,7 +171,7 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint) (*syncRun, er
row := c.db.QueryRowContext(ctx, query, args...)
- err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken)
+ err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
@@ -142,7 +192,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui
}
q := c.db.From(syncRuns.Name()).Prepared(true)
- q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token")
+ q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id")
if pageToken != "" {
q = q.Where(goqu.C("id").Gte(pageToken))
@@ -177,7 +227,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui
}
rowId := 0
data := &syncRun{}
- err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken)
+ err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken, &data.Type, &data.ParentSyncID)
if err != nil {
return nil, "", err
}
@@ -197,7 +247,7 @@ func (c *C1File) LatestSyncID(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "C1File.LatestSyncID")
defer span.End()
- s, err := c.getFinishedSync(ctx, 0)
+ s, err := c.getFinishedSync(ctx, 0, SyncTypeFull)
if err != nil {
return "", err
}
@@ -223,7 +273,7 @@ func (c *C1File) PreviousSyncID(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "C1File.PreviousSyncID")
defer span.End()
- s, err := c.getFinishedSync(ctx, 1)
+ s, err := c.getFinishedSync(ctx, 1, SyncTypeFull)
if err != nil {
return "", err
}
@@ -239,7 +289,7 @@ func (c *C1File) LatestFinishedSync(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "C1File.LatestFinishedSync")
defer span.End()
- s, err := c.getFinishedSync(ctx, 0)
+ s, err := c.getFinishedSync(ctx, 0, SyncTypeFull)
if err != nil {
return "", err
}
@@ -263,16 +313,15 @@ func (c *C1File) getSync(ctx context.Context, syncID string) (*syncRun, error) {
ret := &syncRun{}
q := c.db.From(syncRuns.Name())
- q = q.Select("sync_id", "started_at", "ended_at", "sync_token")
+ q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id")
q = q.Where(goqu.C("sync_id").Eq(syncID))
query, args, err := q.ToSQL()
if err != nil {
return nil, err
}
-
row := c.db.QueryRowContext(ctx, query, args...)
- err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken)
+ err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID)
if err != nil {
return nil, err
}
@@ -291,6 +340,19 @@ func (c *C1File) getCurrentSync(ctx context.Context) (*syncRun, error) {
return c.getSync(ctx, c.currentSyncID)
}
+func (c *C1File) SetCurrentSync(ctx context.Context, syncID string) error {
+ ctx, span := tracer.Start(ctx, "C1File.SetCurrentSync")
+ defer span.End()
+
+ _, err := c.getSync(ctx, syncID)
+ if err != nil {
+ return err
+ }
+
+ c.currentSyncID = syncID
+ return nil
+}
+
func (c *C1File) CheckpointSync(ctx context.Context, syncToken string) error {
ctx, span := tracer.Start(ctx, "C1File.CheckpointSync")
defer span.End()
@@ -355,6 +417,26 @@ func (c *C1File) StartNewSync(ctx context.Context) (string, error) {
ctx, span := tracer.Start(ctx, "C1File.StartNewSync")
defer span.End()
+ return c.startNewSyncInternal(ctx, SyncTypeFull, "")
+}
+
+func (c *C1File) StartNewSyncV2(ctx context.Context, syncType string, parentSyncID string) (string, error) {
+ ctx, span := tracer.Start(ctx, "C1File.StartNewSyncV2")
+ defer span.End()
+
+ var syncTypeEnum SyncType
+ switch syncType {
+ case "full":
+ syncTypeEnum = SyncTypeFull
+ case "partial":
+ syncTypeEnum = SyncTypePartial
+ default:
+ return "", fmt.Errorf("invalid sync type: %s", syncType)
+ }
+ return c.startNewSyncInternal(ctx, syncTypeEnum, parentSyncID)
+}
+
+func (c *C1File) startNewSyncInternal(ctx context.Context, syncType SyncType, parentSyncID string) (string, error) {
// Not sure if we want to do this here
if c.currentSyncID != "" {
return c.currentSyncID, nil
@@ -362,27 +444,36 @@ func (c *C1File) StartNewSync(ctx context.Context) (string, error) {
syncID := ksuid.New().String()
+ if err := c.insertSyncRun(ctx, syncID, syncType, parentSyncID); err != nil {
+ return "", err
+ }
+
+ c.currentSyncID = syncID
+
+ return c.currentSyncID, nil
+}
+
+func (c *C1File) insertSyncRun(ctx context.Context, syncID string, syncType SyncType, parentSyncID string) error {
q := c.db.Insert(syncRuns.Name())
q = q.Rows(goqu.Record{
- "sync_id": syncID,
- "started_at": time.Now().Format("2006-01-02 15:04:05.999999999"),
- "sync_token": "",
+ "sync_id": syncID,
+ "started_at": time.Now().Format("2006-01-02 15:04:05.999999999"),
+ "sync_token": "",
+ "sync_type": syncType,
+ "parent_sync_id": parentSyncID,
})
query, args, err := q.ToSQL()
if err != nil {
- return "", err
+ return err
}
_, err = c.db.ExecContext(ctx, query, args...)
if err != nil {
- return "", err
+ return err
}
-
c.dbUpdated = true
- c.currentSyncID = syncID
-
- return c.currentSyncID, nil
+ return nil
}
func (c *C1File) CurrentSyncStep(ctx context.Context) (string, error) {
@@ -407,11 +498,21 @@ func (c *C1File) EndSync(ctx context.Context) error {
return err
}
+ if err := c.endSyncRun(ctx, c.currentSyncID); err != nil {
+ return err
+ }
+
+ c.currentSyncID = ""
+
+ return nil
+}
+
+func (c *C1File) endSyncRun(ctx context.Context, syncID string) error {
q := c.db.Update(syncRuns.Name())
q = q.Set(goqu.Record{
"ended_at": time.Now().Format("2006-01-02 15:04:05.999999999"),
})
- q = q.Where(goqu.C("sync_id").Eq(c.currentSyncID))
+ q = q.Where(goqu.C("sync_id").Eq(syncID))
q = q.Where(goqu.C("ended_at").IsNull())
query, args, err := q.ToSQL()
@@ -423,8 +524,6 @@ func (c *C1File) EndSync(ctx context.Context) error {
if err != nil {
return err
}
-
- c.currentSyncID = ""
c.dbUpdated = true
return nil
@@ -452,6 +551,7 @@ func (c *C1File) Cleanup(ctx context.Context) error {
}
var ret []*syncRun
+ var partials []*syncRun
pageToken := ""
for {
@@ -464,7 +564,11 @@ func (c *C1File) Cleanup(ctx context.Context) error {
if sr.EndedAt == nil {
continue
}
- ret = append(ret, sr)
+ if sr.Type == SyncTypePartial {
+ partials = append(partials, sr)
+ } else {
+ ret = append(ret, sr)
+ }
}
if nextPageToken == "" {
@@ -492,6 +596,25 @@ func (c *C1File) Cleanup(ctx context.Context) error {
l.Info("Removed old sync data.", zap.String("sync_date", ret[i].EndedAt.Format(time.RFC3339)), zap.String("sync_id", ret[i].ID))
}
+ // Delete partial syncs that ended before the earliest-kept sync started
+ if len(ret) > syncLimit {
+ earliestKeptSync := ret[len(ret)-syncLimit]
+ l.Debug("Earliest kept sync", zap.String("sync_id", earliestKeptSync.ID), zap.Time("started_at", *earliestKeptSync.StartedAt))
+
+ for _, partial := range partials {
+ if partial.EndedAt != nil && partial.EndedAt.Before(*earliestKeptSync.StartedAt) {
+ err = c.DeleteSyncRun(ctx, partial.ID)
+ if err != nil {
+ return err
+ }
+ l.Info("Removed partial sync that ended before earliest kept sync.",
+ zap.String("partial_sync_end", partial.EndedAt.Format(time.RFC3339)),
+ zap.String("earliest_kept_sync_start", earliestKeptSync.StartedAt.Format(time.RFC3339)),
+ zap.String("sync_id", partial.ID))
+ }
+ }
+ }
+
err = c.Vacuum(ctx)
if err != nil {
return err
@@ -574,10 +697,12 @@ func (c *C1File) GetSync(ctx context.Context, request *reader_v2.SyncsReaderServ
return &reader_v2.SyncsReaderServiceGetSyncResponse{
Sync: &reader_v2.SyncRun{
- Id: sr.ID,
- StartedAt: toTimeStamp(sr.StartedAt),
- EndedAt: toTimeStamp(sr.EndedAt),
- SyncToken: sr.SyncToken,
+ Id: sr.ID,
+ StartedAt: toTimeStamp(sr.StartedAt),
+ EndedAt: toTimeStamp(sr.EndedAt),
+ SyncToken: sr.SyncToken,
+ SyncType: string(sr.Type),
+ ParentSyncId: sr.ParentSyncID,
},
}, nil
}
@@ -594,10 +719,12 @@ func (c *C1File) ListSyncs(ctx context.Context, request *reader_v2.SyncsReaderSe
syncRuns := make([]*reader_v2.SyncRun, len(syncs))
for i, sr := range syncs {
syncRuns[i] = &reader_v2.SyncRun{
- Id: sr.ID,
- StartedAt: toTimeStamp(sr.StartedAt),
- EndedAt: toTimeStamp(sr.EndedAt),
- SyncToken: sr.SyncToken,
+ Id: sr.ID,
+ StartedAt: toTimeStamp(sr.StartedAt),
+ EndedAt: toTimeStamp(sr.EndedAt),
+ SyncToken: sr.SyncToken,
+ SyncType: string(sr.Type),
+ ParentSyncId: sr.ParentSyncID,
}
}
@@ -611,17 +738,30 @@ func (c *C1File) GetLatestFinishedSync(ctx context.Context, request *reader_v2.S
ctx, span := tracer.Start(ctx, "C1File.GetLatestFinishedSync")
defer span.End()
- sync, err := c.getFinishedSync(ctx, 0)
+ syncType := request.SyncType
+ if syncType == "" {
+ syncType = string(SyncTypeFull)
+ }
+
+ sync, err := c.getFinishedSync(ctx, 0, SyncType(syncType))
if err != nil {
return nil, fmt.Errorf("error fetching latest finished sync: %w", err)
}
+ if sync == nil {
+ return &reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse{
+ Sync: nil,
+ }, nil
+ }
+
return &reader_v2.SyncsReaderServiceGetLatestFinishedSyncResponse{
Sync: &reader_v2.SyncRun{
- Id: sync.ID,
- StartedAt: toTimeStamp(sync.StartedAt),
- EndedAt: toTimeStamp(sync.EndedAt),
- SyncToken: sync.SyncToken,
+ Id: sync.ID,
+ StartedAt: toTimeStamp(sync.StartedAt),
+ EndedAt: toTimeStamp(sync.EndedAt),
+ SyncToken: sync.SyncToken,
+ SyncType: string(sync.Type),
+ ParentSyncId: sync.ParentSyncID,
},
}, nil
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go
index 07a00ca9..ef8f093e 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/default_relationships.go
@@ -6,6 +6,8 @@ var DefaultRelationships = []SchemaFieldRelationship{
FieldsRequiredTogether(createTicketField, ticketTemplatePathField),
FieldsRequiredTogether(bulkCreateTicketField, bulkTicketTemplatePathField),
FieldsRequiredTogether(getTicketField, ticketIDField),
+ FieldsRequiredTogether(diffSyncsField, diffSyncsBaseSyncField, diffSyncsAppliedSyncField),
+ FieldsRequiredTogether(compactSyncsField, compactSyncIDsField, compactFilePathsField, compactOutputDirectoryField),
FieldsMutuallyExclusive(
grantEntitlementField,
revokeGrantField,
@@ -25,6 +27,8 @@ var DefaultRelationships = []SchemaFieldRelationship{
deleteResourceTypeField,
rotateCredentialsTypeField,
eventFeedField,
+ diffSyncsField,
+ compactSyncsField,
ListTicketSchemasField,
),
FieldsDependentOn(
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go
index 169593ef..54b7717f 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/defaults.go
@@ -1,6 +1,10 @@
package field
-import "github.com/conductorone/baton-sdk/pkg/logging"
+import (
+ "time"
+
+ "github.com/conductorone/baton-sdk/pkg/logging"
+)
const (
OtelCollectorEndpointFieldName = "otel-collector-endpoint"
@@ -34,7 +38,14 @@ var (
deleteResourceField = StringField("delete-resource", WithHidden(true), WithDescription("The id of the resource to delete"), WithPersistent(true), WithExportTarget(ExportTargetNone))
deleteResourceTypeField = StringField("delete-resource-type", WithHidden(true), WithDescription("The type of the resource to delete"), WithPersistent(true), WithExportTarget(ExportTargetNone))
eventFeedField = StringField("event-feed", WithHidden(true), WithDescription("Read feed events to stdout"), WithPersistent(true), WithExportTarget(ExportTargetNone))
- fileField = StringField("file", WithShortHand("f"), WithDefaultValue("sync.c1z"), WithDescription("The path to the c1z file to sync with"),
+ eventFeedIdField = StringField("event-feed-id", WithHidden(true), WithDescription("The id of the event feed to read events from"), WithPersistent(true), WithExportTarget(ExportTargetNone))
+ eventFeedStartAtField = StringField("event-feed-start-at",
+ WithDefaultValue(time.Now().AddDate(0, 0, -1).Format(time.RFC3339)),
+ WithHidden(true),
+ WithDescription("The start time of the event feed to read events from"),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone))
+ fileField = StringField("file", WithShortHand("f"), WithDefaultValue("sync.c1z"), WithDescription("The path to the c1z file to sync with"),
WithPersistent(true), WithExportTarget(ExportTargetNone))
grantEntitlementField = StringField("grant-entitlement", WithHidden(true), WithDescription("The id of the entitlement to grant to the supplied principal"),
WithPersistent(true), WithExportTarget(ExportTargetNone))
@@ -54,7 +65,52 @@ var (
WithPersistent(true), WithExportTarget(ExportTargetNone))
logLevelField = StringField("log-level", WithDefaultValue("info"), WithDescription("The log level: debug, info, warn, error"), WithPersistent(true),
WithExportTarget(ExportTargetOps))
- skipFullSync = BoolField("skip-full-sync", WithDescription("This must be set to skip a full sync"), WithPersistent(true), WithExportTarget(ExportTargetNone))
+ skipFullSync = BoolField("skip-full-sync", WithDescription("This must be set to skip a full sync"), WithPersistent(true), WithExportTarget(ExportTargetNone))
+ targetedSyncResourceIDs = StringSliceField("sync-resources", WithDescription("The resource IDs to sync"), WithPersistent(true), WithExportTarget(ExportTargetNone))
+ diffSyncsField = BoolField(
+ "diff-syncs",
+ WithDescription("Create a new partial SyncID from a base and applied sync."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+ diffSyncsBaseSyncField = StringField("base-sync-id",
+ WithDescription("The base sync to diff from."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+ diffSyncsAppliedSyncField = StringField("applied-sync-id",
+ WithDescription("The sync to show diffs when applied to the base sync."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+
+ compactSyncsField = BoolField("compact-syncs",
+ WithDescription("Provide a list of sync files to compact into a single c1z file and sync ID."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+ compactOutputDirectoryField = StringField("compact-output-path",
+ WithDescription("The directory to store the results in"),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+ compactFilePathsField = StringSliceField("compact-file-paths",
+ WithDescription("A comma-separated list of file paths to sync from."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
+ compactSyncIDsField = StringSliceField("compact-sync-ids",
+ WithDescription("A comma-separated list of file ids to sync from. Must match sync IDs from each file provided. Order matters."),
+ WithHidden(true),
+ WithPersistent(true),
+ WithExportTarget(ExportTargetNone),
+ )
otelCollectorEndpoint = StringField(OtelCollectorEndpointFieldName,
WithDescription("The endpoint of the OpenTelemetry collector to send observability data to (used for both tracing and logging if specific endpoints are not provided)"),
@@ -148,6 +204,8 @@ var DefaultFields = []SchemaField{
deleteResourceField,
deleteResourceTypeField,
eventFeedField,
+ eventFeedIdField,
+ eventFeedStartAtField,
fileField,
grantEntitlementField,
grantPrincipalField,
@@ -160,8 +218,16 @@ var DefaultFields = []SchemaField{
ticketTemplatePathField,
logLevelField,
skipFullSync,
+ targetedSyncResourceIDs,
externalResourceC1ZField,
externalResourceEntitlementIdFilter,
+ diffSyncsField,
+ diffSyncsBaseSyncField,
+ diffSyncsAppliedSyncField,
+ compactSyncIDsField,
+ compactFilePathsField,
+ compactOutputDirectoryField,
+ compactSyncsField,
otelCollectorEndpoint,
otelCollectorEndpointTLSCertPath,
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go
index 01ba1652..cb235a57 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/field/fields.go
@@ -72,6 +72,8 @@ type SchemaField struct {
// Config acutally ingested on the connector side - auth, regions, etc
ConnectorConfig connectorConfig
+
+ WasReExported bool
}
type SchemaTypes interface {
@@ -109,6 +111,13 @@ func (s SchemaField) GetDescription() string {
return line
}
+func (s SchemaField) ExportAs(et ExportTarget) SchemaField {
+ c := s
+ c.ExportTarget = et
+ c.WasReExported = true
+ return c
+}
+
// Go doesn't allow generic methods on a non-generic struct.
func ValidateField[T SchemaTypes](s *SchemaField, value T) (bool, error) {
return s.validate(value)
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go b/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go
new file mode 100644
index 00000000..ed9fea89
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/retry/retry.go
@@ -0,0 +1,113 @@
+package retry
+
+import (
+ "context"
+ "math"
+ "time"
+
+ v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
+ "go.opentelemetry.io/otel"
+ "go.uber.org/zap"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var tracer = otel.Tracer("baton-sdk/retry")
+
+type Retryer struct {
+ attempts uint
+ maxAttempts uint
+ initialDelay time.Duration
+ maxDelay time.Duration
+}
+
+type RetryConfig struct {
+ MaxAttempts uint // 0 means no limit (which is also the default).
+ InitialDelay time.Duration // Default is 1 second.
+ MaxDelay time.Duration // Default is 60 seconds. 0 means no limit.
+}
+
+func NewRetryer(ctx context.Context, config RetryConfig) *Retryer {
+ r := &Retryer{
+ attempts: 0,
+ maxAttempts: config.MaxAttempts,
+ initialDelay: config.InitialDelay,
+ maxDelay: config.MaxDelay,
+ }
+ if r.initialDelay == 0 {
+ r.initialDelay = time.Second
+ }
+ if r.maxDelay == 0 {
+ r.maxDelay = 60 * time.Second
+ }
+ return r
+}
+
+func (r *Retryer) ShouldWaitAndRetry(ctx context.Context, err error) bool {
+ ctx, span := tracer.Start(ctx, "retry.ShouldWaitAndRetry")
+ defer span.End()
+
+ if err == nil {
+ r.attempts = 0
+ return true
+ }
+ if status.Code(err) != codes.Unavailable && status.Code(err) != codes.DeadlineExceeded {
+ return false
+ }
+
+ r.attempts++
+ l := ctxzap.Extract(ctx)
+
+ if r.maxAttempts > 0 && r.attempts > r.maxAttempts {
+ l.Warn("max attempts reached", zap.Error(err), zap.Uint("max_attempts", r.maxAttempts))
+ return false
+ }
+
+ // use linear backoff by default
+ var wait time.Duration
+ if r.attempts > math.MaxInt64 {
+ wait = r.maxDelay
+ } else {
+ wait = time.Duration(int64(r.attempts)) * r.initialDelay
+ }
+
+ // If error contains rate limit data, use that instead
+ if st, ok := status.FromError(err); ok {
+ details := st.Details()
+ for _, detail := range details {
+ if rlData, ok := detail.(*v2.RateLimitDescription); ok {
+ waitResetAt := time.Until(rlData.ResetAt.AsTime())
+ if waitResetAt <= 0 {
+ continue
+ }
+ duration := time.Duration(rlData.Limit)
+ if duration <= 0 {
+ continue
+ }
+ waitResetAt /= duration
+ // Round up to the nearest second to make sure we don't hit the rate limit again
+ waitResetAt = time.Duration(math.Ceil(waitResetAt.Seconds())) * time.Second
+ if waitResetAt > 0 {
+ wait = waitResetAt
+ break
+ }
+ }
+ }
+ }
+
+ if wait > r.maxDelay {
+ wait = r.maxDelay
+ }
+
+ l.Warn("retrying operation", zap.Error(err), zap.Duration("wait", wait))
+
+ for {
+ select {
+ case <-time.After(wait):
+ return true
+ case <-ctx.Done():
+ return false
+ }
+ }
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go
index 19b7dd35..2667bbab 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/empty_connector.go
@@ -4,71 +4,162 @@ import (
"context"
v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
type emptyConnector struct{}
// GetAsset gets an asset.
-func (n *emptyConnector) GetAsset(request *v2.AssetServiceGetAssetRequest, server v2.AssetService_GetAssetServer) error {
- err := server.Send(&v2.AssetServiceGetAssetResponse{
- Msg: &v2.AssetServiceGetAssetResponse_Metadata_{
- Metadata: &v2.AssetServiceGetAssetResponse_Metadata{ContentType: "application/example"},
- },
- })
- if err != nil {
- return err
- }
-
- err = server.Send(&v2.AssetServiceGetAssetResponse{
- Msg: &v2.AssetServiceGetAssetResponse_Data_{
- Data: &v2.AssetServiceGetAssetResponse_Data{Data: nil},
- },
- })
- if err != nil {
- return err
- }
-
- return nil
+func (n *emptyConnector) GetAsset(_ context.Context, request *v2.AssetServiceGetAssetRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[v2.AssetServiceGetAssetResponse], error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
}
// ListResourceTypes returns a list of resource types.
-func (n *emptyConnector) ListResourceTypes(ctx context.Context, request *v2.ResourceTypesServiceListResourceTypesRequest) (*v2.ResourceTypesServiceListResourceTypesResponse, error) {
+func (n *emptyConnector) ListResourceTypes(
+ ctx context.Context,
+ request *v2.ResourceTypesServiceListResourceTypesRequest,
+ opts ...grpc.CallOption,
+) (*v2.ResourceTypesServiceListResourceTypesResponse, error) {
return &v2.ResourceTypesServiceListResourceTypesResponse{
List: []*v2.ResourceType{},
}, nil
}
// ListResources returns a list of resources.
-func (n *emptyConnector) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest) (*v2.ResourcesServiceListResourcesResponse, error) {
+func (n *emptyConnector) ListResources(ctx context.Context, request *v2.ResourcesServiceListResourcesRequest, opts ...grpc.CallOption) (*v2.ResourcesServiceListResourcesResponse, error) {
return &v2.ResourcesServiceListResourcesResponse{
List: []*v2.Resource{},
}, nil
}
+func (n *emptyConnector) GetResource(
+ ctx context.Context,
+ request *v2.ResourceGetterServiceGetResourceRequest,
+ opts ...grpc.CallOption,
+) (*v2.ResourceGetterServiceGetResourceResponse, error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
+}
+
// ListEntitlements returns a list of entitlements.
-func (n *emptyConnector) ListEntitlements(ctx context.Context, request *v2.EntitlementsServiceListEntitlementsRequest) (*v2.EntitlementsServiceListEntitlementsResponse, error) {
+func (n *emptyConnector) ListEntitlements(
+ ctx context.Context,
+ request *v2.EntitlementsServiceListEntitlementsRequest,
+ opts ...grpc.CallOption,
+) (*v2.EntitlementsServiceListEntitlementsResponse, error) {
return &v2.EntitlementsServiceListEntitlementsResponse{
List: []*v2.Entitlement{},
}, nil
}
// ListGrants returns a list of grants.
-func (n *emptyConnector) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) {
+func (n *emptyConnector) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest, opts ...grpc.CallOption) (*v2.GrantsServiceListGrantsResponse, error) {
return &v2.GrantsServiceListGrantsResponse{
List: []*v2.Grant{},
}, nil
}
+func (n *emptyConnector) Grant(ctx context.Context, request *v2.GrantManagerServiceGrantRequest, opts ...grpc.CallOption) (*v2.GrantManagerServiceGrantResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) Revoke(ctx context.Context, request *v2.GrantManagerServiceRevokeRequest, opts ...grpc.CallOption) (*v2.GrantManagerServiceRevokeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
// GetMetadata returns a connector metadata.
-func (n *emptyConnector) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest) (*v2.ConnectorServiceGetMetadataResponse, error) {
+func (n *emptyConnector) GetMetadata(ctx context.Context, request *v2.ConnectorServiceGetMetadataRequest, opts ...grpc.CallOption) (*v2.ConnectorServiceGetMetadataResponse, error) {
return &v2.ConnectorServiceGetMetadataResponse{Metadata: &v2.ConnectorMetadata{}}, nil
}
// Validate is called by the connector framework to validate the correct response.
-func (n *emptyConnector) Validate(ctx context.Context, request *v2.ConnectorServiceValidateRequest) (*v2.ConnectorServiceValidateResponse, error) {
+func (n *emptyConnector) Validate(ctx context.Context, request *v2.ConnectorServiceValidateRequest, opts ...grpc.CallOption) (*v2.ConnectorServiceValidateResponse, error) {
return &v2.ConnectorServiceValidateResponse{}, nil
}
+func (n *emptyConnector) BulkCreateTickets(ctx context.Context, request *v2.TicketsServiceBulkCreateTicketsRequest, opts ...grpc.CallOption) (*v2.TicketsServiceBulkCreateTicketsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) BulkGetTickets(ctx context.Context, request *v2.TicketsServiceBulkGetTicketsRequest, opts ...grpc.CallOption) (*v2.TicketsServiceBulkGetTicketsResponse, error) {
+ return &v2.TicketsServiceBulkGetTicketsResponse{
+ Tickets: []*v2.TicketsServiceGetTicketResponse{},
+ }, nil
+}
+
+func (n *emptyConnector) CreateTicket(ctx context.Context, request *v2.TicketsServiceCreateTicketRequest, opts ...grpc.CallOption) (*v2.TicketsServiceCreateTicketResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) GetTicket(ctx context.Context, request *v2.TicketsServiceGetTicketRequest, opts ...grpc.CallOption) (*v2.TicketsServiceGetTicketResponse, error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
+}
+
+func (n *emptyConnector) ListTicketSchemas(ctx context.Context, request *v2.TicketsServiceListTicketSchemasRequest, opts ...grpc.CallOption) (*v2.TicketsServiceListTicketSchemasResponse, error) {
+ return &v2.TicketsServiceListTicketSchemasResponse{
+ List: []*v2.TicketSchema{},
+ }, nil
+}
+
+func (n *emptyConnector) GetTicketSchema(ctx context.Context, request *v2.TicketsServiceGetTicketSchemaRequest, opts ...grpc.CallOption) (*v2.TicketsServiceGetTicketSchemaResponse, error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
+}
+
+func (n *emptyConnector) Cleanup(ctx context.Context, request *v2.ConnectorServiceCleanupRequest, opts ...grpc.CallOption) (*v2.ConnectorServiceCleanupResponse, error) {
+ return &v2.ConnectorServiceCleanupResponse{}, nil
+}
+
+func (n *emptyConnector) CreateAccount(ctx context.Context, request *v2.CreateAccountRequest, opts ...grpc.CallOption) (*v2.CreateAccountResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) RotateCredential(ctx context.Context, request *v2.RotateCredentialRequest, opts ...grpc.CallOption) (*v2.RotateCredentialResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) CreateResource(ctx context.Context, request *v2.CreateResourceRequest, opts ...grpc.CallOption) (*v2.CreateResourceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) DeleteResource(ctx context.Context, request *v2.DeleteResourceRequest, opts ...grpc.CallOption) (*v2.DeleteResourceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) DeleteResourceV2(ctx context.Context, request *v2.DeleteResourceV2Request, opts ...grpc.CallOption) (*v2.DeleteResourceV2Response, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) GetActionSchema(ctx context.Context, request *v2.GetActionSchemaRequest, opts ...grpc.CallOption) (*v2.GetActionSchemaResponse, error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
+}
+
+func (n *emptyConnector) GetActionStatus(ctx context.Context, request *v2.GetActionStatusRequest, opts ...grpc.CallOption) (*v2.GetActionStatusResponse, error) {
+ return nil, status.Errorf(codes.NotFound, "empty connector")
+}
+
+func (n *emptyConnector) InvokeAction(ctx context.Context, request *v2.InvokeActionRequest, opts ...grpc.CallOption) (*v2.InvokeActionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "empty connector")
+}
+
+func (n *emptyConnector) ListActionSchemas(ctx context.Context, request *v2.ListActionSchemasRequest, opts ...grpc.CallOption) (*v2.ListActionSchemasResponse, error) {
+ return &v2.ListActionSchemasResponse{
+ Schemas: []*v2.BatonActionSchema{},
+ }, nil
+}
+
+func (n *emptyConnector) ListEvents(ctx context.Context, request *v2.ListEventsRequest, opts ...grpc.CallOption) (*v2.ListEventsResponse, error) {
+ return &v2.ListEventsResponse{
+ Events: []*v2.Event{},
+ }, nil
+}
+
+func (n *emptyConnector) ListEventFeeds(ctx context.Context, request *v2.ListEventFeedsRequest, opts ...grpc.CallOption) (*v2.ListEventFeedsResponse, error) {
+ return &v2.ListEventFeedsResponse{
+ List: []*v2.EventFeedMetadata{},
+ }, nil
+}
+
// NewEmptyConnector returns a new emptyConnector.
func NewEmptyConnector() (*emptyConnector, error) {
return &emptyConnector{}, nil
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go
index 38c11253..b2d2da28 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/sdk/version.go
@@ -1,3 +1,3 @@
package sdk
-const Version = "v0.2.92"
+const Version = "v0.3.8"
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go
index 35336394..e8dc6fe6 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/state.go
@@ -29,6 +29,8 @@ type State interface {
SetNeedsExpansion()
HasExternalResourcesGrants() bool
SetHasExternalResourcesGrants()
+ ShouldFetchRelatedResources() bool
+ SetShouldFetchRelatedResources()
}
// ActionOp represents a sync operation.
@@ -53,6 +55,8 @@ func (s ActionOp) String() string {
return "fetch-assets"
case SyncGrantExpansionOp:
return "grant-expansion"
+ case SyncTargetedResourceOp:
+ return "targeted-resource-sync"
default:
return "unknown"
}
@@ -94,6 +98,8 @@ func newActionOp(str string) ActionOp {
return SyncGrantExpansionOp
case SyncExternalResourcesOp.String():
return SyncExternalResourcesOp
+ case SyncTargetedResourceOp.String():
+ return SyncTargetedResourceOp
default:
return UnknownOp
}
@@ -110,6 +116,7 @@ const (
SyncExternalResourcesOp
SyncAssetsOp
SyncGrantExpansionOp
+ SyncTargetedResourceOp
)
// Action stores the current operation, page token, and optional fields for which resource is being worked with.
@@ -124,22 +131,24 @@ type Action struct {
// state is an object used for tracking the current status of a connector sync. It operates like a stack.
type state struct {
- mtx sync.RWMutex
- actions []Action
- currentAction *Action
- entitlementGraph *expand.EntitlementGraph
- needsExpansion bool
- hasExternalResourceGrants bool
+ mtx sync.RWMutex
+ actions []Action
+ currentAction *Action
+ entitlementGraph *expand.EntitlementGraph
+ needsExpansion bool
+ hasExternalResourceGrants bool
+ shouldFetchRelatedResources bool
}
// serializedToken is used to serialize the token to JSON. This separate object is used to avoid having exported fields
// on the object used externally. We should interface this, probably.
type serializedToken struct {
- Actions []Action `json:"actions"`
- CurrentAction *Action `json:"current_action"`
- NeedsExpansion bool `json:"needs_expansion"`
- EntitlementGraph *expand.EntitlementGraph `json:"entitlement_graph"`
- HasExternalResourceGrants bool `json:"has_external_resource_grants"`
+ Actions []Action `json:"actions"`
+ CurrentAction *Action `json:"current_action"`
+ NeedsExpansion bool `json:"needs_expansion"`
+ EntitlementGraph *expand.EntitlementGraph `json:"entitlement_graph"`
+ HasExternalResourceGrants bool `json:"has_external_resource_grants"`
+ ShouldFetchRelatedResources bool `json:"should_fetch_related_resources"`
}
// push adds a new action to the stack. If there is no current state, the action is directly set to current, else
@@ -281,6 +290,14 @@ func (st *state) SetHasExternalResourcesGrants() {
st.hasExternalResourceGrants = true
}
+func (st *state) ShouldFetchRelatedResources() bool {
+ return st.shouldFetchRelatedResources
+}
+
+func (st *state) SetShouldFetchRelatedResources() {
+ st.shouldFetchRelatedResources = true
+}
+
// PageToken returns the page token for the current action.
func (st *state) PageToken(ctx context.Context) string {
c := st.Current()
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go
index 2ed36fe0..c7c82247 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/sync/syncer.go
@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
- "math"
"os"
"slices"
"strconv"
@@ -16,6 +15,7 @@ import (
"github.com/conductorone/baton-sdk/pkg/bid"
"github.com/conductorone/baton-sdk/pkg/dotc1z"
+ "github.com/conductorone/baton-sdk/pkg/retry"
"github.com/conductorone/baton-sdk/pkg/sync/expand"
"github.com/conductorone/baton-sdk/pkg/types/entitlement"
batonGrant "github.com/conductorone/baton-sdk/pkg/types/grant"
@@ -203,8 +203,10 @@ type syncer struct {
skipFullSync bool
lastCheckPointTime time.Time
counts *ProgressCounts
-
- skipEGForResourceType map[string]bool
+ targetedSyncResourceIDs []string
+ onlyExpandGrants bool
+ syncID string
+ skipEGForResourceType map[string]bool
}
const minCheckpointInterval = 10 * time.Second
@@ -244,72 +246,61 @@ func (s *syncer) handleProgress(ctx context.Context, a *Action, c int) {
}
}
-var attempts = 0
-
-func shouldWaitAndRetry(ctx context.Context, err error) bool {
- ctx, span := tracer.Start(ctx, "syncer.shouldWaitAndRetry")
- defer span.End()
-
+func isWarning(ctx context.Context, err error) bool {
if err == nil {
- attempts = 0
- return true
- }
- if status.Code(err) != codes.Unavailable && status.Code(err) != codes.DeadlineExceeded {
return false
}
- attempts++
- l := ctxzap.Extract(ctx)
+ if status.Code(err) == codes.NotFound {
+ return true
+ }
- // use linear time by default
- var wait time.Duration = time.Duration(attempts) * time.Second
+ return false
+}
- // If error contains rate limit data, use that instead
- if st, ok := status.FromError(err); ok {
- details := st.Details()
- for _, detail := range details {
- if rlData, ok := detail.(*v2.RateLimitDescription); ok {
- waitResetAt := time.Until(rlData.ResetAt.AsTime())
- if waitResetAt <= 0 {
- continue
- }
- duration := time.Duration(rlData.Limit)
- if duration <= 0 {
- continue
- }
- waitResetAt /= duration
- // Round up to the nearest second to make sure we don't hit the rate limit again
- waitResetAt = time.Duration(math.Ceil(waitResetAt.Seconds())) * time.Second
- if waitResetAt > 0 {
- wait = waitResetAt
- break
- }
- }
+func (s *syncer) startOrResumeSync(ctx context.Context) (string, bool, error) {
+ // Sync resuming logic:
+ // If no targetedSyncResourceIDs, find the most recent sync and resume it (regardless of partial or full).
+ // If targetedSyncResourceIDs, start a new partial sync. Use the most recent completed sync as the parent sync ID (if it exists).
+
+ if s.syncID != "" {
+ err := s.store.SetCurrentSync(ctx, s.syncID)
+ if err != nil {
+ return "", false, err
}
+ return s.syncID, false, nil
}
- l.Warn("retrying operation", zap.Error(err), zap.Duration("wait", wait))
-
- for {
- select {
- case <-time.After(wait):
- return true
- case <-ctx.Done():
- return false
+ var syncID string
+ var newSync bool
+ var err error
+ if len(s.targetedSyncResourceIDs) == 0 {
+ syncID, newSync, err = s.store.StartSync(ctx)
+ if err != nil {
+ return "", false, err
}
+ return syncID, newSync, nil
}
-}
-func isWarning(ctx context.Context, err error) bool {
- if err == nil {
- return false
+ // Get most recent completed full sync if it exists
+ latestFullSyncResponse, err := s.store.GetLatestFinishedSync(ctx, &reader_v2.SyncsReaderServiceGetLatestFinishedSyncRequest{
+ SyncType: string(dotc1z.SyncTypeFull),
+ })
+ if err != nil {
+ return "", false, err
}
-
- if status.Code(err) == codes.NotFound {
- return true
+ var latestFullSyncId string
+ latestFullSync := latestFullSyncResponse.Sync
+ if latestFullSync != nil {
+ latestFullSyncId = latestFullSync.Id
+ }
+ syncID, err = s.store.StartNewSyncV2(ctx, "partial", latestFullSyncId)
+ if err != nil {
+ return "", false, err
}
+ newSync = true
- return false
+ return syncID, newSync, nil
}
// Sync starts the syncing process. The sync process is driven by the action stack that is part of the state object.
@@ -345,7 +336,17 @@ func (s *syncer) Sync(ctx context.Context) error {
return err
}
- syncID, newSync, err := s.store.StartSync(ctx)
+ // Validate any targeted resource IDs before starting a sync.
+ targetedResources := []*v2.Resource{}
+ for _, resourceID := range s.targetedSyncResourceIDs {
+ r, err := bid.ParseResourceBid(resourceID)
+ if err != nil {
+ return fmt.Errorf("error parsing resource id %s: %w", resourceID, err)
+ }
+ targetedResources = append(targetedResources, r)
+ }
+
+ syncID, newSync, err := s.startOrResumeSync(ctx)
if err != nil {
return err
}
@@ -370,6 +371,12 @@ func (s *syncer) Sync(ctx context.Context) error {
}
s.state = state
+ retryer := retry.NewRetryer(ctx, retry.RetryConfig{
+ MaxAttempts: 0,
+ InitialDelay: 1 * time.Second,
+ MaxDelay: 0,
+ })
+
var warnings []error
for s.state.Current() != nil {
err = s.Checkpoint(ctx, false)
@@ -401,12 +408,40 @@ func (s *syncer) Sync(ctx context.Context) error {
case InitOp:
s.state.FinishAction(ctx)
+ if len(targetedResources) > 0 {
+ for _, r := range targetedResources {
+ s.state.PushAction(ctx, Action{
+ Op: SyncTargetedResourceOp,
+ ResourceID: r.GetId().GetResource(),
+ ResourceTypeID: r.GetId().GetResourceType(),
+ ParentResourceID: r.GetParentResourceId().GetResource(),
+ ParentResourceTypeID: r.GetParentResourceId().GetResourceType(),
+ })
+ }
+ s.state.SetShouldFetchRelatedResources()
+ s.state.PushAction(ctx, Action{Op: SyncResourceTypesOp})
+ err = s.Checkpoint(ctx, true)
+ if err != nil {
+ return err
+ }
+ // Don't do grant expansion or external resources in partial syncs, as we likely lack related resources/entitlements/grants
+ continue
+ }
+
// FIXME(jirwin): Disabling syncing assets for now
// s.state.PushAction(ctx, Action{Op: SyncAssetsOp})
s.state.PushAction(ctx, Action{Op: SyncGrantExpansionOp})
if s.externalResourceReader != nil {
s.state.PushAction(ctx, Action{Op: SyncExternalResourcesOp})
}
+ if s.onlyExpandGrants {
+ s.state.SetNeedsExpansion()
+ err = s.Checkpoint(ctx, true)
+ if err != nil {
+ return err
+ }
+ continue
+ }
s.state.PushAction(ctx, Action{Op: SyncGrantsOp})
s.state.PushAction(ctx, Action{Op: SyncEntitlementsOp})
s.state.PushAction(ctx, Action{Op: SyncResourcesOp})
@@ -420,14 +455,27 @@ func (s *syncer) Sync(ctx context.Context) error {
case SyncResourceTypesOp:
err = s.SyncResourceTypes(ctx)
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
case SyncResourcesOp:
err = s.SyncResources(ctx)
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
+ return err
+ }
+ continue
+
+ case SyncTargetedResourceOp:
+ err = s.SyncTargetedResource(ctx)
+ if isWarning(ctx, err) {
+ l.Warn("skipping sync targeted resource action", zap.Any("stateAction", stateAction), zap.Error(err))
+ warnings = append(warnings, err)
+ s.state.FinishAction(ctx)
+ continue
+ }
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
@@ -440,7 +488,7 @@ func (s *syncer) Sync(ctx context.Context) error {
s.state.FinishAction(ctx)
continue
}
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
@@ -453,20 +501,20 @@ func (s *syncer) Sync(ctx context.Context) error {
s.state.FinishAction(ctx)
continue
}
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
case SyncExternalResourcesOp:
err = s.SyncExternalResources(ctx)
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
case SyncAssetsOp:
err = s.SyncAssets(ctx)
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
@@ -479,7 +527,7 @@ func (s *syncer) Sync(ctx context.Context) error {
}
err = s.SyncGrantExpansion(ctx)
- if !shouldWaitAndRetry(ctx, err) {
+ if !retryer.ShouldWaitAndRetry(ctx, err) {
return err
}
continue
@@ -630,6 +678,94 @@ func (s *syncer) getSubResources(ctx context.Context, parent *v2.Resource) error
return nil
}
+func (s *syncer) getResourceFromConnector(ctx context.Context, resourceID *v2.ResourceId, parentResourceID *v2.ResourceId) (*v2.Resource, error) {
+ ctx, span := tracer.Start(ctx, "syncer.getResource")
+ defer span.End()
+
+ resourceResp, err := s.connector.GetResource(ctx,
+ &v2.ResourceGetterServiceGetResourceRequest{
+ ResourceId: resourceID,
+ ParentResourceId: parentResourceID,
+ },
+ )
+ if err == nil {
+ return resourceResp.Resource, nil
+ }
+ l := ctxzap.Extract(ctx)
+ if status.Code(err) == codes.NotFound {
+ l.Warn("skipping resource due to not found", zap.String("resource_id", resourceID.GetResource()), zap.String("resource_type_id", resourceID.GetResourceType()))
+ return nil, nil
+ }
+ if status.Code(err) == codes.Unimplemented {
+ l.Warn("skipping resource due to unimplemented connector", zap.String("resource_id", resourceID.GetResource()), zap.String("resource_type_id", resourceID.GetResourceType()))
+ return nil, nil
+ }
+ return nil, err
+}
+
+func (s *syncer) SyncTargetedResource(ctx context.Context) error {
+ ctx, span := tracer.Start(ctx, "syncer.SyncTargetedResource")
+ defer span.End()
+
+ resourceID := s.state.ResourceID(ctx)
+ resourceTypeID := s.state.ResourceTypeID(ctx)
+ if resourceID == "" || resourceTypeID == "" {
+ return errors.New("cannot get resource without a resource target")
+ }
+
+ parentResourceID := s.state.ParentResourceID(ctx)
+ parentResourceTypeID := s.state.ParentResourceTypeID(ctx)
+ var prID *v2.ResourceId
+ if parentResourceID != "" && parentResourceTypeID != "" {
+ prID = &v2.ResourceId{
+ ResourceType: parentResourceTypeID,
+ Resource: parentResourceID,
+ }
+ }
+
+ resource, err := s.getResourceFromConnector(ctx, &v2.ResourceId{
+ ResourceType: resourceTypeID,
+ Resource: resourceID,
+ }, prID)
+ if err != nil {
+ return err
+ }
+
+ // If getResource encounters not found or unimplemented, it returns a nil resource and nil error.
+ if resource == nil {
+ s.state.FinishAction(ctx)
+ return nil
+ }
+
+ // Save our resource in the DB
+ if err := s.store.PutResources(ctx, resource); err != nil {
+ return err
+ }
+
+ s.state.FinishAction(ctx)
+
+ // Actions happen in reverse order. We want to sync child resources, then entitlements, then grants
+
+ s.state.PushAction(ctx, Action{
+ Op: SyncGrantsOp,
+ ResourceTypeID: resourceTypeID,
+ ResourceID: resourceID,
+ })
+
+ s.state.PushAction(ctx, Action{
+ Op: SyncEntitlementsOp,
+ ResourceTypeID: resourceTypeID,
+ ResourceID: resourceID,
+ })
+
+ err = s.getSubResources(ctx, resource)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
// SyncResources handles fetching all of the resources from the connector given the provided resource types. For each
// resource, we gather any child resource types it may emit, and traverse the resource tree.
func (s *syncer) SyncResources(ctx context.Context) error {
@@ -659,7 +795,14 @@ func (s *syncer) SyncResources(ctx context.Context) error {
}
for _, rt := range resp.List {
- s.state.PushAction(ctx, Action{Op: SyncResourcesOp, ResourceTypeID: rt.Id})
+ action := Action{Op: SyncResourcesOp, ResourceTypeID: rt.Id}
+ // If this request specified a parent resource, only queue up syncing resources for children of the parent resource
+ if s.state.Current().ParentResourceTypeID != "" && s.state.Current().ParentResourceID != "" {
+ action.ParentResourceID = s.state.Current().ParentResourceID
+ action.ParentResourceTypeID = s.state.Current().ParentResourceTypeID
+ }
+
+ s.state.PushAction(ctx, action)
}
return nil
@@ -1416,6 +1559,7 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou
// We want to process any grants from the previous sync first so that if there is a conflict, the newer data takes precedence
grants = append(grants, resp.List...)
+ l := ctxzap.Extract(ctx)
for _, grant := range grants {
grantAnnos := annotations.Annotations(grant.GetAnnotations())
if grantAnnos.Contains(&v2.GrantExpandable{}) {
@@ -1424,6 +1568,57 @@ func (s *syncer) syncGrantsForResource(ctx context.Context, resourceID *v2.Resou
if grantAnnos.ContainsAny(&v2.ExternalResourceMatchAll{}, &v2.ExternalResourceMatch{}, &v2.ExternalResourceMatchID{}) {
s.state.SetHasExternalResourcesGrants()
}
+
+ if !s.state.ShouldFetchRelatedResources() {
+ continue
+ }
+ // Some connectors emit grants for other resources. If we're doing a partial sync, check if it exists and queue a fetch if not.
+ entitlementResource := grant.GetEntitlement().GetResource()
+ _, err := s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{
+ ResourceId: entitlementResource.GetId(),
+ })
+ if err != nil {
+ if !errors.Is(err, sql.ErrNoRows) {
+ return err
+ }
+
+ erId := entitlementResource.GetId()
+ prId := entitlementResource.GetParentResourceId()
+ resource, err := s.getResourceFromConnector(ctx, erId, prId)
+ if err != nil {
+ l.Error("error fetching entitlement resource", zap.Error(err))
+ return err
+ }
+ if resource == nil {
+ continue
+ }
+ if err := s.store.PutResources(ctx, resource); err != nil {
+ return err
+ }
+ }
+
+ principalResource := grant.GetPrincipal()
+ _, err = s.store.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{
+ ResourceId: principalResource.GetId(),
+ })
+ if err != nil {
+ if !errors.Is(err, sql.ErrNoRows) {
+ return err
+ }
+
+ // Principal resource is not in the DB, so try to fetch it from the connector.
+ resource, err := s.getResourceFromConnector(ctx, principalResource.GetId(), principalResource.GetParentResourceId())
+ if err != nil {
+ l.Error("error fetching principal resource", zap.Error(err))
+ return err
+ }
+ if resource == nil {
+ continue
+ }
+ if err := s.store.PutResources(ctx, resource); err != nil {
+ return err
+ }
+ }
}
err = s.store.PutGrants(ctx, grants...)
if err != nil {
@@ -1543,6 +1738,14 @@ func (s *syncer) SyncExternalResourcesWithGrantToEntitlement(ctx context.Context
for _, resourceId := range resourceIDs {
resourceResp, err := s.externalResourceReader.GetResource(ctx, &reader_v2.ResourcesReaderServiceGetResourceRequest{ResourceId: resourceId})
if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ l.Debug(
+ "resource was not found in external sync",
+ zap.String("resource_id", resourceId.Resource),
+ zap.String("resource_type_id", resourceId.ResourceType),
+ )
+ continue
+ }
return err
}
resourceVal := resourceResp.GetResource()
@@ -1983,7 +2186,7 @@ func (s *syncer) processGrantsWithExternalPrincipals(ctx context.Context, princi
}
}
profileVal, ok := resource.GetProfileStringValue(userTrait.Profile, matchExternalResource.Key)
- if ok && profileVal == matchExternalResource.Value {
+ if ok && strings.EqualFold(profileVal, matchExternalResource.Value) {
newGrant := newGrantForExternalPrincipal(grant, userPrincipal)
expandedGrants = append(expandedGrants, newGrant)
}
@@ -1996,7 +2199,7 @@ func (s *syncer) processGrantsWithExternalPrincipals(ctx context.Context, princi
continue
}
profileVal, ok := resource.GetProfileStringValue(groupTrait.Profile, matchExternalResource.Key)
- if ok && profileVal == matchExternalResource.Value {
+ if ok && strings.EqualFold(profileVal, matchExternalResource.Value) {
newGrant := newGrantForExternalPrincipal(grant, groupPrincipal)
newGrantAnnos := annotations.Annotations(newGrant.Annotations)
@@ -2510,6 +2713,24 @@ func WithExternalResourceEntitlementIdFilter(entitlementId string) SyncOpt {
}
}
+func WithTargetedSyncResourceIDs(resourceIDs []string) SyncOpt {
+ return func(s *syncer) {
+ s.targetedSyncResourceIDs = resourceIDs
+ }
+}
+
+func WithOnlyExpandGrants() SyncOpt {
+ return func(s *syncer) {
+ s.onlyExpandGrants = true
+ }
+}
+
+func WithSyncID(syncID string) SyncOpt {
+ return func(s *syncer) {
+ s.syncID = syncID
+ }
+}
+
// NewSyncer returns a new syncer object.
func NewSyncer(ctx context.Context, c types.ConnectorClient, opts ...SyncOpt) (Syncer, error) {
s := &syncer{
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go
new file mode 100644
index 00000000..535c8ffe
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/compactor.go
@@ -0,0 +1,254 @@
+package synccompactor
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+
+ reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2"
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+ c1zmanager "github.com/conductorone/baton-sdk/pkg/dotc1z/manager"
+ "github.com/conductorone/baton-sdk/pkg/sdk"
+ "github.com/conductorone/baton-sdk/pkg/sync"
+ sync_compactor "github.com/conductorone/baton-sdk/pkg/synccompactor/naive"
+ "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
+ "go.uber.org/zap"
+)
+
+type Compactor struct {
+ entries []*CompactableSync
+
+ tmpDir string
+ destDir string
+}
+
+type CompactableSync struct {
+ FilePath string
+ SyncID string
+}
+
+var ErrNotEnoughFilesToCompact = errors.New("must provide two or more files to compact")
+
+type Option func(*Compactor)
+
+// WithTmpDir sets the working directory where files will be created and edited during compaction.
+// If not provided, the temporary directory will be used.
+func WithTmpDir(tempDir string) Option {
+ return func(c *Compactor) {
+ c.tmpDir = tempDir
+ }
+}
+
+func NewCompactor(ctx context.Context, outputDir string, compactableSyncs []*CompactableSync, opts ...Option) (*Compactor, func() error, error) {
+ if len(compactableSyncs) < 2 {
+ return nil, nil, ErrNotEnoughFilesToCompact
+ }
+
+ c := &Compactor{entries: compactableSyncs, destDir: outputDir}
+ for _, opt := range opts {
+ opt(c)
+ }
+
+ // If no tmpDir is provided, use the tmpDir
+ if c.tmpDir == "" {
+ c.tmpDir = os.TempDir()
+ }
+ tmpDir, err := os.MkdirTemp(c.tmpDir, "baton-sync-compactor-")
+ if err != nil {
+ return nil, nil, err
+ }
+ c.tmpDir = tmpDir
+
+ cleanup := func() error {
+ if err := os.RemoveAll(c.tmpDir); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return c, cleanup, nil
+}
+
+func (c *Compactor) Compact(ctx context.Context) (*CompactableSync, error) {
+ if len(c.entries) < 2 {
+ return nil, nil
+ }
+
+ base := c.entries[0]
+ for i := 1; i < len(c.entries); i++ {
+ applied := c.entries[i]
+
+ compactable, err := c.doOneCompaction(ctx, base, applied)
+ if err != nil {
+ return nil, err
+ }
+
+ base = compactable
+ }
+
+ l := ctxzap.Extract(ctx)
+ // Grant expansion doesn't use the connector interface at all, so giving syncer an empty connector is safe... for now.
+ // If that ever changes, we should implement a file connector that is a wrapper around the reader.
+ emptyConnector, err := sdk.NewEmptyConnector()
+ if err != nil {
+ l.Error("error creating empty connector", zap.Error(err))
+ return nil, err
+ }
+
+ // Use syncer to expand grants.
+ // TODO: Handle external resources.
+ syncer, err := sync.NewSyncer(
+ ctx,
+ emptyConnector,
+ sync.WithC1ZPath(base.FilePath),
+ sync.WithSyncID(base.SyncID),
+ sync.WithOnlyExpandGrants(),
+ )
+ if err != nil {
+ l.Error("error creating syncer", zap.Error(err))
+ return nil, err
+ }
+
+ if err := syncer.Sync(ctx); err != nil {
+ l.Error("error syncing with grant expansion", zap.Error(err))
+ return nil, err
+ }
+ if err := syncer.Close(ctx); err != nil {
+ l.Error("error closing syncer", zap.Error(err))
+ return nil, err
+ }
+
+ // Move last compacted file to the destination dir
+ finalPath := path.Join(c.destDir, fmt.Sprintf("compacted-%s.c1z", base.SyncID))
+ if err := cpFile(base.FilePath, finalPath); err != nil {
+ return nil, err
+ }
+
+ if !filepath.IsAbs(finalPath) {
+ abs, err := filepath.Abs(finalPath)
+ if err != nil {
+ return nil, err
+ }
+ finalPath = abs
+ }
+ return &CompactableSync{FilePath: finalPath, SyncID: base.SyncID}, nil
+}
+
+func cpFile(sourcePath string, destPath string) error {
+ source, err := os.Open(sourcePath)
+ if err != nil {
+ return fmt.Errorf("failed to open source file: %w", err)
+ }
+ defer source.Close()
+
+ destination, err := os.Create(destPath)
+ if err != nil {
+ return fmt.Errorf("failed to create destination file: %w", err)
+ }
+ defer destination.Close()
+
+ _, err = io.Copy(destination, source)
+ if err != nil {
+ return fmt.Errorf("failed to copy file: %w", err)
+ }
+
+ return nil
+}
+
+func getLatestObjects(ctx context.Context, info *CompactableSync) (*reader_v2.SyncRun, *dotc1z.C1File, c1zmanager.Manager, func(), error) {
+ baseC1Z, err := c1zmanager.New(ctx, info.FilePath)
+ if err != nil {
+ return nil, nil, nil, nil, err
+ }
+
+ cleanup := func() {
+ _ = baseC1Z.Close(ctx)
+ }
+
+ baseFile, err := baseC1Z.LoadC1Z(ctx)
+ if err != nil {
+ return nil, nil, nil, cleanup, err
+ }
+
+ cleanup = func() {
+ _ = baseFile.Close()
+ _ = baseC1Z.Close(ctx)
+ }
+
+ latestAppliedSync, err := baseFile.GetSync(ctx, &reader_v2.SyncsReaderServiceGetSyncRequest{
+ SyncId: info.SyncID,
+ Annotations: nil,
+ })
+ if err != nil {
+ return nil, nil, nil, cleanup, err
+ }
+
+ return latestAppliedSync.Sync, baseFile, baseC1Z, cleanup, nil
+}
+
+func (c *Compactor) doOneCompaction(ctx context.Context, base *CompactableSync, applied *CompactableSync) (*CompactableSync, error) {
+ l := ctxzap.Extract(ctx)
+ l.Info(
+ "running compaction",
+ zap.String("base_file", base.FilePath),
+ zap.String("base_sync", base.SyncID),
+ zap.String("applied_file", applied.FilePath),
+ zap.String("applied_sync", applied.SyncID),
+ zap.String("tmp_dir", c.tmpDir),
+ )
+
+ opts := []dotc1z.C1ZOption{
+ dotc1z.WithPragma("journal_mode", "WAL"),
+ dotc1z.WithTmpDir(c.tmpDir),
+ }
+
+ fileName := fmt.Sprintf("compacted-%s-%s.c1z", base.SyncID, applied.SyncID)
+ newFile, err := dotc1z.NewC1ZFile(ctx, path.Join(c.tmpDir, fileName), opts...)
+ if err != nil {
+ l.Error("doOneCompaction failed: could not create c1z file", zap.Error(err))
+ return nil, err
+ }
+ defer func() { _ = newFile.Close() }()
+
+ newSync, err := newFile.StartNewSyncV2(ctx, string(dotc1z.SyncTypeFull), "")
+ if err != nil {
+ return nil, err
+ }
+
+ _, baseFile, _, cleanupBase, err := getLatestObjects(ctx, base)
+ defer cleanupBase()
+ if err != nil {
+ return nil, err
+ }
+
+ _, appliedFile, _, cleanupApplied, err := getLatestObjects(ctx, applied)
+ defer cleanupApplied()
+ if err != nil {
+ return nil, err
+ }
+
+ runner := sync_compactor.NewNaiveCompactor(baseFile, appliedFile, newFile)
+
+ if err := runner.Compact(ctx); err != nil {
+ l.Error("error running compaction", zap.Error(err))
+ return nil, err
+ }
+
+ if err := newFile.EndSync(ctx); err != nil {
+ return nil, err
+ }
+
+ outputFilepath, err := newFile.OutputFilepath()
+ if err != nil {
+ return nil, err
+ }
+
+ return &CompactableSync{
+ FilePath: outputFilepath,
+ SyncID: newSync,
+ }, nil
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go
new file mode 100644
index 00000000..7e4ae6e9
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive.go
@@ -0,0 +1,88 @@
+package naive
+
+import (
+ "context"
+
+ "github.com/conductorone/baton-sdk/pkg/dotc1z"
+ "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
+ "go.uber.org/zap"
+ "google.golang.org/protobuf/proto"
+)
+
+func NewNaiveCompactor(base *dotc1z.C1File, applied *dotc1z.C1File, dest *dotc1z.C1File) *Compactor {
+ return &Compactor{
+ base: base,
+ applied: applied,
+ dest: dest,
+ }
+}
+
+type Compactor struct {
+ base *dotc1z.C1File
+ applied *dotc1z.C1File
+ dest *dotc1z.C1File
+}
+
+func (n *Compactor) Compact(ctx context.Context) error {
+ if err := n.processResourceTypes(ctx); err != nil {
+ return err
+ }
+ if err := n.processResources(ctx); err != nil {
+ return err
+ }
+ if err := n.processEntitlements(ctx); err != nil {
+ return err
+ }
+ if err := n.processGrants(ctx); err != nil {
+ return err
+ }
+ return nil
+}
+
+func naiveCompact[T proto.Message, REQ listRequest, RESP listResponse[T]](
+ ctx context.Context,
+ base listFunc[T, REQ, RESP],
+ applied listFunc[T, REQ, RESP],
+ save func(context.Context, ...T) error,
+) error {
+ var t T
+ l := ctxzap.Extract(ctx)
+ l.Info("naive compaction: compacting objects", zap.String("object_type", string(t.ProtoReflect().Descriptor().FullName())))
+ // List all objects from the base file and save them in the destination file
+ if err := listAllObjects(ctx, base, func(items []T) (bool, error) {
+ if err := save(ctx, items...); err != nil {
+ return false, err
+ }
+ return true, nil
+ }); err != nil {
+ return err
+ }
+
+ // Then list all objects from the applied file and save them in the destination file, overwriting ones with the same external_id
+ if err := listAllObjects(ctx, applied, func(items []T) (bool, error) {
+ if err := save(ctx, items...); err != nil {
+ return false, err
+ }
+ return true, nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (n *Compactor) processResourceTypes(ctx context.Context) error {
+ return naiveCompact(ctx, n.base.ListResourceTypes, n.applied.ListResourceTypes, n.dest.PutResourceTypesIfNewer)
+}
+
+func (n *Compactor) processResources(ctx context.Context) error {
+ return naiveCompact(ctx, n.base.ListResources, n.applied.ListResources, n.dest.PutResourcesIfNewer)
+}
+
+func (n *Compactor) processGrants(ctx context.Context) error {
+ return naiveCompact(ctx, n.base.ListGrants, n.applied.ListGrants, n.dest.PutGrantsIfNewer)
+}
+
+func (n *Compactor) processEntitlements(ctx context.Context) error {
+ return naiveCompact(ctx, n.base.ListEntitlements, n.applied.ListEntitlements, n.dest.PutEntitlementsIfNewer)
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go
new file mode 100644
index 00000000..cc4f8064
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/synccompactor/naive/naive_unroll.go
@@ -0,0 +1,98 @@
+package naive
+
+import (
+ "context"
+ "reflect"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+type listRequest interface {
+ proto.Message
+ GetPageSize() uint32
+ GetPageToken() string
+ GetAnnotations() []*anypb.Any
+}
+
+type listResponse[T proto.Message] interface {
+ GetNextPageToken() string
+ GetAnnotations() []*anypb.Any
+ GetList() []T
+}
+
+// createRequest creates a new request object of type REQ using reflection.
+func createRequest[REQ listRequest]() REQ {
+ var r REQ
+ baseType := reflect.TypeOf(r).Elem()
+ pointerToInitializedVal := reflect.New(baseType)
+ return pointerToInitializedVal.Interface().(REQ)
+}
+
+// setFieldIfValid sets a field in a struct if it exists and can be set.
+func setFieldIfValid(obj interface{}, fieldName string, setValue func(reflect.Value)) {
+ val := reflect.ValueOf(obj)
+ if val.Kind() != reflect.Ptr || val.IsNil() {
+ return
+ }
+
+ field := val.Elem().FieldByName(fieldName)
+ if field.IsValid() && field.CanSet() {
+ setValue(field)
+ }
+}
+
+// setPageSize sets the PageSize field in a request to the specified value.
+func setPageSize(req listRequest, size uint64) {
+ setFieldIfValid(req, "PageSize", func(field reflect.Value) {
+ field.SetUint(size)
+ })
+}
+
+// setPageToken sets the PageToken field in a request to the specified token.
+func setPageToken(req listRequest, token string) {
+ setFieldIfValid(req, "PageToken", func(field reflect.Value) {
+ field.SetString(token)
+ })
+}
+
+type listFunc[T proto.Message, REQ listRequest, RESP listResponse[T]] func(context.Context, REQ) (RESP, error)
+
+func listAllObjects[T proto.Message, REQ listRequest, RESP listResponse[T]](ctx context.Context, list listFunc[T, REQ, RESP], cb func(items []T) (bool, error)) error {
+ // Create a new request using reflection
+ req := createRequest[REQ]()
+
+ // Set initial page size
+ setPageSize(req, 100) // Set a reasonable default page size
+
+ var nextPageToken string
+ for {
+ // Set the page token for the current request if needed
+ if nextPageToken != "" {
+ setPageToken(req, nextPageToken)
+ }
+
+ // Call the list function with the current request
+ resp, err := list(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ // Collect the results
+ shouldContinue, err := cb(resp.GetList())
+ if err != nil {
+ return err
+ }
+ if !shouldContinue {
+ return nil
+ }
+
+ // Check if there are more pages
+ nextPageToken = resp.GetNextPageToken()
+ if nextPageToken == "" || len(resp.GetList()) == 0 {
+ break // No more pages
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go
index c9690ae1..0bec6a27 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/full_sync.go
@@ -32,6 +32,7 @@ type fullSyncTaskHandler struct {
skipFullSync bool
externalResourceC1ZPath string
externalResourceEntitlementIdFilter string
+ targetedSyncResourceIDs []string
}
func (c *fullSyncTaskHandler) sync(ctx context.Context, c1zPath string) error {
@@ -57,6 +58,10 @@ func (c *fullSyncTaskHandler) sync(ctx context.Context, c1zPath string) error {
syncOpts = append(syncOpts, sdkSync.WithSkipFullSync())
}
+ if len(c.targetedSyncResourceIDs) > 0 {
+ syncOpts = append(syncOpts, sdkSync.WithTargetedSyncResourceIDs(c.targetedSyncResourceIDs))
+ }
+
syncer, err := sdkSync.NewSyncer(ctx, c.helpers.ConnectorClient(), syncOpts...)
if err != nil {
l.Error("failed to create syncer", zap.Error(err))
@@ -156,13 +161,21 @@ func (c *fullSyncTaskHandler) HandleTask(ctx context.Context) error {
return c.helpers.FinishTask(ctx, nil, nil, nil)
}
-func newFullSyncTaskHandler(task *v1.Task, helpers fullSyncHelpers, skipFullSync bool, externalResourceC1ZPath string, externalResourceEntitlementIdFilter string) tasks.TaskHandler {
+func newFullSyncTaskHandler(
+ task *v1.Task,
+ helpers fullSyncHelpers,
+ skipFullSync bool,
+ externalResourceC1ZPath string,
+ externalResourceEntitlementIdFilter string,
+ targetedSyncResourceIDs []string,
+) tasks.TaskHandler {
return &fullSyncTaskHandler{
task: task,
helpers: helpers,
skipFullSync: skipFullSync,
externalResourceC1ZPath: externalResourceC1ZPath,
externalResourceEntitlementIdFilter: externalResourceEntitlementIdFilter,
+ targetedSyncResourceIDs: targetedSyncResourceIDs,
}
}
@@ -172,15 +185,33 @@ func uploadDebugLogs(ctx context.Context, helper fullSyncHelpers) error {
l := ctxzap.Extract(ctx)
- debugfilelocation := filepath.Join(helper.TempDir(), "debug.log")
+ tempDir := helper.TempDir()
+ if tempDir == "" {
+ wd, err := os.Getwd()
+ if err != nil {
+ l.Warn("unable to get the current working directory", zap.Error(err))
+ }
+ if wd != "" {
+ l.Warn("no temporal folder found on this system according to our sync helper,"+
+ " we may create files in the current working directory by mistake as a result",
+ zap.String("current working directory", wd))
+ } else {
+ l.Warn("no temporal folder found on this system according to our sync helper")
+ }
+ }
+ debugfilelocation := filepath.Join(tempDir, "debug.log")
_, err := os.Stat(debugfilelocation)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
+ switch {
+ case errors.Is(err, os.ErrNotExist):
l.Warn("debug log file does not exists", zap.Error(err))
- return nil
+ case errors.Is(err, os.ErrPermission):
+ l.Warn("debug log file cannot be stat'd due to lack of permissions", zap.Error(err))
+ default:
+ l.Warn("cannot stat debug log file", zap.Error(err))
}
- return err
+ return nil
} else {
debugfile, err := os.Open(debugfilelocation)
if err != nil {
@@ -190,7 +221,6 @@ func uploadDebugLogs(ctx context.Context, helper fullSyncHelpers) error {
l.Info("uploading debug logs", zap.String("file", debugfilelocation))
err = helper.Upload(ctx, debugfile)
-
if err != nil {
return err
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go
index 216d1320..ad3867e3 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/c1api/manager.go
@@ -52,6 +52,7 @@ type c1ApiTaskManager struct {
runnerShouldDebug bool
externalResourceC1Z string
externalResourceEntitlementIdFilter string
+ targetedSyncResourceIDs []string
}
// getHeartbeatInterval returns an appropriate heartbeat interval. If the interval is 0, it will return the default heartbeat interval.
@@ -246,7 +247,14 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types.
var handler tasks.TaskHandler
switch tasks.GetType(task) {
case taskTypes.FullSyncType:
- handler = newFullSyncTaskHandler(task, tHelpers, c.skipFullSync, c.externalResourceC1Z, c.externalResourceEntitlementIdFilter)
+ handler = newFullSyncTaskHandler(
+ task,
+ tHelpers,
+ c.skipFullSync,
+ c.externalResourceC1Z,
+ c.externalResourceEntitlementIdFilter,
+ c.targetedSyncResourceIDs,
+ )
case taskTypes.HelloType:
handler = newHelloTaskHandler(task, tHelpers)
case taskTypes.GrantType:
@@ -296,7 +304,8 @@ func (c *c1ApiTaskManager) Process(ctx context.Context, task *v1.Task, cc types.
func NewC1TaskManager(
ctx context.Context, clientID string, clientSecret string, tempDir string, skipFullSync bool,
- externalC1Z string, externalResourceEntitlementIdFilter string) (tasks.Manager, error) {
+ externalC1Z string, externalResourceEntitlementIdFilter string, targetedSyncResourceIDs []string,
+) (tasks.Manager, error) {
serviceClient, err := newServiceClient(ctx, clientID, clientSecret)
if err != nil {
return nil, err
@@ -308,5 +317,6 @@ func NewC1TaskManager(
skipFullSync: skipFullSync,
externalResourceC1Z: externalC1Z,
externalResourceEntitlementIdFilter: externalResourceEntitlementIdFilter,
+ targetedSyncResourceIDs: targetedSyncResourceIDs,
}, nil
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go
new file mode 100644
index 00000000..1153134f
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/compactor.go
@@ -0,0 +1,71 @@
+package local
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1"
+ "github.com/conductorone/baton-sdk/pkg/synccompactor"
+ "github.com/conductorone/baton-sdk/pkg/tasks"
+ "github.com/conductorone/baton-sdk/pkg/types"
+ "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+type localCompactor struct {
+ o sync.Once
+
+ compactableSyncs []*synccompactor.CompactableSync
+ outputPath string
+}
+
+func (m *localCompactor) GetTempDir() string {
+ return ""
+}
+
+func (m *localCompactor) ShouldDebug() bool {
+ return false
+}
+
+func (m *localCompactor) Next(ctx context.Context) (*v1.Task, time.Duration, error) {
+ var task *v1.Task
+ m.o.Do(func() {
+ task = &v1.Task{
+ TaskType: &v1.Task_CompactSyncs_{},
+ }
+ })
+ return task, 0, nil
+}
+
+func (m *localCompactor) Process(ctx context.Context, task *v1.Task, cc types.ConnectorClient) error {
+ ctx, span := tracer.Start(ctx, "localCompactor.Process", trace.WithNewRoot())
+ defer span.End()
+ log := ctxzap.Extract(ctx)
+
+ compactor, cleanup, err := synccompactor.NewCompactor(ctx, m.outputPath, m.compactableSyncs)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = cleanup()
+ }()
+
+ compacted, err := compactor.Compact(ctx)
+ if err != nil {
+ return err
+ }
+
+ log.Info("compacted file", zap.String("file_path", compacted.FilePath), zap.String("sync_id", compacted.SyncID))
+
+ return nil
+}
+
+// NewLocalCompactor returns a task manager that queues a revoke task.
+func NewLocalCompactor(ctx context.Context, outputPath string, compactableSyncs []*synccompactor.CompactableSync) tasks.Manager {
+ return &localCompactor{
+ compactableSyncs: compactableSyncs,
+ outputPath: outputPath,
+ }
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go
new file mode 100644
index 00000000..75212f8c
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/differ.go
@@ -0,0 +1,92 @@
+package local
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1"
+ c1zmanager "github.com/conductorone/baton-sdk/pkg/dotc1z/manager"
+ "github.com/conductorone/baton-sdk/pkg/tasks"
+ "github.com/conductorone/baton-sdk/pkg/types"
+ "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap"
+)
+
+type localDiffer struct {
+ dbPath string
+ o sync.Once
+
+ baseSyncID string
+ appliedSyncID string
+}
+
+func (m *localDiffer) GetTempDir() string {
+ return ""
+}
+
+func (m *localDiffer) ShouldDebug() bool {
+ return false
+}
+
+func (m *localDiffer) Next(ctx context.Context) (*v1.Task, time.Duration, error) {
+ var task *v1.Task
+ m.o.Do(func() {
+ task = &v1.Task{
+ TaskType: &v1.Task_CreateSyncDiff{},
+ }
+ })
+ return task, 0, nil
+}
+
+func (m *localDiffer) Process(ctx context.Context, task *v1.Task, cc types.ConnectorClient) error {
+ ctx, span := tracer.Start(ctx, "localDiffer.Process", trace.WithNewRoot())
+ defer span.End()
+ log := ctxzap.Extract(ctx)
+
+ if m.baseSyncID == "" || m.appliedSyncID == "" {
+ return errors.New("missing base sync ID or applied sync ID")
+ }
+
+ store, err := c1zmanager.New(ctx, m.dbPath)
+ if err != nil {
+ return err
+ }
+ file, err := store.LoadC1Z(ctx)
+ if err != nil {
+ return err
+ }
+
+ newSyncID, err := file.GenerateSyncDiff(ctx, m.baseSyncID, m.appliedSyncID)
+ if err != nil {
+ return err
+ }
+
+ if err := file.Close(); err != nil {
+ return err
+ }
+
+ if err := store.SaveC1Z(ctx); err != nil {
+ log.Error("failed to save diff", zap.Error(err))
+ return err
+ }
+ if err := store.Close(ctx); err != nil {
+ log.Error("failed to close store", zap.Error(err))
+ return err
+ }
+
+ log.Info("generated diff of syncs", zap.String("new_sync_id", newSyncID))
+
+ return nil
+}
+
+// NewDiffer returns a task manager that queues a revoke task.
+func NewDiffer(ctx context.Context, dbPath string, baseSyncID string, appliedSyncID string) tasks.Manager {
+ return &localDiffer{
+ dbPath: dbPath,
+ baseSyncID: baseSyncID,
+ appliedSyncID: appliedSyncID,
+ }
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go
index 0415b4e6..18e04a4b 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/event_feed.go
@@ -6,17 +6,19 @@ import (
"sync"
"time"
- "go.opentelemetry.io/otel/trace"
- "google.golang.org/protobuf/encoding/protojson"
-
v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2"
v1 "github.com/conductorone/baton-sdk/pb/c1/connectorapi/baton/v1"
"github.com/conductorone/baton-sdk/pkg/tasks"
"github.com/conductorone/baton-sdk/pkg/types"
+ "go.opentelemetry.io/otel/trace"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
type localEventFeed struct {
- o sync.Once
+ o sync.Once
+ feedId string
+ startAt time.Time
}
const EventsPerPageLocally = 100
@@ -33,7 +35,11 @@ func (m *localEventFeed) Next(ctx context.Context) (*v1.Task, time.Duration, err
var task *v1.Task
m.o.Do(func() {
task = &v1.Task{
- TaskType: &v1.Task_EventFeed{},
+ TaskType: &v1.Task_EventFeed{
+ EventFeed: &v1.Task_EventFeedTask{
+ StartAt: timestamppb.New(m.startAt),
+ },
+ },
}
})
return task, 0, nil
@@ -46,9 +52,10 @@ func (m *localEventFeed) Process(ctx context.Context, task *v1.Task, cc types.Co
var pageToken string
for {
resp, err := cc.ListEvents(ctx, &v2.ListEventsRequest{
- PageSize: EventsPerPageLocally,
- Cursor: pageToken,
- StartAt: task.GetEventFeed().GetStartAt(),
+ PageSize: EventsPerPageLocally,
+ Cursor: pageToken,
+ StartAt: task.GetEventFeed().GetStartAt(),
+ EventFeedId: m.feedId,
})
if err != nil {
return err
@@ -71,6 +78,9 @@ func (m *localEventFeed) Process(ctx context.Context, task *v1.Task, cc types.Co
}
// NewEventFeed returns a task manager that queues an event feed task.
-func NewEventFeed(ctx context.Context) tasks.Manager {
- return &localEventFeed{}
+func NewEventFeed(ctx context.Context, feedId string, startAt time.Time) tasks.Manager {
+ return &localEventFeed{
+ feedId: feedId,
+ startAt: startAt,
+ }
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go
index d6675f2b..d02a6d9a 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/local/syncer.go
@@ -20,6 +20,7 @@ type localSyncer struct {
tmpDir string
externalResourceC1Z string
externalResourceEntitlementIdFilter string
+ targetedSyncResourceIDs []string
}
type Option func(*localSyncer)
@@ -42,6 +43,12 @@ func WithExternalResourceEntitlementIdFilter(entitlementId string) Option {
}
}
+func WithTargetedSyncResourceIDs(resourceIDs []string) Option {
+ return func(m *localSyncer) {
+ m.targetedSyncResourceIDs = resourceIDs
+ }
+}
+
func (m *localSyncer) GetTempDir() string {
return ""
}
@@ -68,7 +75,9 @@ func (m *localSyncer) Process(ctx context.Context, task *v1.Task, cc types.Conne
sdkSync.WithC1ZPath(m.dbPath),
sdkSync.WithTmpDir(m.tmpDir),
sdkSync.WithExternalResourceC1ZPath(m.externalResourceC1Z),
- sdkSync.WithExternalResourceEntitlementIdFilter(m.externalResourceEntitlementIdFilter))
+ sdkSync.WithExternalResourceEntitlementIdFilter(m.externalResourceEntitlementIdFilter),
+ sdkSync.WithTargetedSyncResourceIDs(m.targetedSyncResourceIDs),
+ )
if err != nil {
return err
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go
index c4ca3f67..d90dd35b 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/tasks/tasks.go
@@ -66,6 +66,8 @@ func Is(task *v1.Task, target taskTypes.TaskType) bool {
_, ok = task.GetTaskType().(*v1.Task_ActionInvoke)
case taskTypes.ActionStatusType:
_, ok = task.GetTaskType().(*v1.Task_ActionStatus)
+ case taskTypes.CreateSyncDiff:
+ _, ok = task.GetTaskType().(*v1.Task_CreateSyncDiff)
default:
return false
}
@@ -117,6 +119,8 @@ func GetType(task *v1.Task) taskTypes.TaskType {
return taskTypes.ActionInvokeType
case *v1.Task_ActionStatus:
return taskTypes.ActionStatusType
+ case *v1.Task_CreateSyncDiff:
+ return taskTypes.CreateSyncDiff
default:
return taskTypes.UnknownType
}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go
index 69699b79..5be4e840 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/entitlement/entitlement.go
@@ -71,3 +71,28 @@ func NewAssignmentEntitlement(resource *v2.Resource, name string, entitlementOpt
}
return entitlement
}
+
+func NewEntitlement(resource *v2.Resource, name, purposeStr string, entitlementOptions ...EntitlementOption) *v2.Entitlement {
+ var purpose v2.Entitlement_PurposeValue
+ switch purposeStr {
+ case "permission":
+ purpose = v2.Entitlement_PURPOSE_VALUE_PERMISSION
+ case "assignment":
+ purpose = v2.Entitlement_PURPOSE_VALUE_ASSIGNMENT
+ default:
+ purpose = v2.Entitlement_PURPOSE_VALUE_UNSPECIFIED
+ }
+
+ entitlement := &v2.Entitlement{
+ Id: NewEntitlementID(resource, name),
+ DisplayName: name,
+ Slug: name,
+ Purpose: purpose,
+ Resource: resource,
+ }
+
+ for _, entitlementOption := range entitlementOptions {
+ entitlementOption(entitlement)
+ }
+ return entitlement
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go
index 71dd5dee..73071aa4 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/tasks/tasks.go
@@ -36,6 +36,8 @@ func (tt TaskType) String() string {
return "list_resource_types"
case ListResourcesType:
return "list_resources"
+ case GetResourceType:
+ return "get_resource"
case ListEntitlementsType:
return "list_entitlements"
case ListGrantsType:
@@ -81,10 +83,12 @@ const (
GetTicketSchemaType
ListResourceTypesType
ListResourcesType
+ GetResourceType
ListEntitlementsType
ListGrantsType
GetMetadataType
ListEventsType
+ ListEventFeedsType
StartDebugging
BulkCreateTicketsType
BulkGetTicketsType
@@ -92,4 +96,5 @@ const (
ActionGetSchemaType
ActionInvokeType
ActionStatusType
+ CreateSyncDiff
)
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/types/types.go b/vendor/github.com/conductorone/baton-sdk/pkg/types/types.go
index b82cd451..191c0a9b 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/types/types.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/types/types.go
@@ -23,6 +23,7 @@ type ConnectorServer interface {
connectorV2.EventServiceServer
connectorV2.TicketsServiceServer
connectorV2.ActionServiceServer
+ connectorV2.ResourceGetterServiceServer
}
// ConnectorClient is an interface for a type that implements all ConnectorV2 services.
@@ -41,6 +42,7 @@ type ConnectorClient interface {
connectorV2.EventServiceClient
connectorV2.TicketsServiceClient
connectorV2.ActionServiceClient
+ connectorV2.ResourceGetterServiceClient
}
// ClientWrapper is an interface that returns a connector client.
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/pagination.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/pagination.go
new file mode 100644
index 00000000..397eb4b9
--- /dev/null
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/pagination.go
@@ -0,0 +1,99 @@
+package uhttp
+
+import (
+ "strings"
+
+ "github.com/conductorone/baton-sdk/pkg/pagination"
+)
+
+/*
+ * Uhttp pagination handling.
+ * There are three common types of pagination:
+ * 1. NextLink: http header containing a url to fetch the next page
+ * 2. Cursor: http body containing a token to fetch the next page
+ * 3. Offset: offset + limit to fetch the next page
+ * - Subset of offset: incremental page numbers
+ *
+ * All of these helper functions take a bag and push the next page state on (if there is a next page).
+ */
+
+type NextLinkConfig struct {
+ Header string `json:"header,omitempty"` // HTTP header containing the next link. Defaults to "link".
+ Rel string `json:"rel,omitempty"` // The rel value to look for in the link header. Defaults to "next".
+ ResourceTypeID string `json:"resource_type_id,omitempty"`
+ ResourceID string `json:"resource_id,omitempty"`
+}
+
+// Parses the link header and returns a map of rel values to URLs.
+func parseLinkHeader(header string) (map[string]string, error) {
+ if header == "" {
+ // Empty header is fine, it just means there are no more pages.
+ return nil, nil
+ }
+
+ links := make(map[string]string)
+ headerLinks := strings.Split(header, ",")
+ for _, headerLink := range headerLinks {
+ linkParts := strings.Split(headerLink, ";")
+ if len(linkParts) < 2 {
+ continue
+ }
+ linkUrl := strings.TrimSpace(linkParts[0])
+ linkUrl = strings.Trim(linkUrl, "<>")
+ var relValue string
+ for _, rel := range linkParts[1:] {
+ rel = strings.TrimSpace(rel)
+ relParts := strings.Split(rel, "=")
+ if len(relParts) < 2 {
+ continue
+ }
+ if relParts[0] == "rel" {
+ relValue = strings.Trim(relParts[1], "\"")
+ break
+ }
+ }
+ if relValue == "" {
+ continue
+ }
+ links[relValue] = linkUrl
+ }
+
+ return links, nil
+}
+
+// WithNextLinkPagination handles nextlink pagination.
+// The config is optional, and if not provided, the default config will be used.
+func WithNextLinkPagination(bag *pagination.Bag, config *NextLinkConfig) DoOption {
+ return func(resp *WrapperResponse) error {
+ if config == nil {
+ config = &NextLinkConfig{
+ Header: "link",
+ Rel: "next",
+ }
+ }
+ if config.Header == "" {
+ config.Header = "link"
+ }
+ if config.Rel == "" {
+ config.Rel = "next"
+ }
+ nextLinkVal := resp.Header.Get(config.Header)
+ if nextLinkVal == "" {
+ return nil
+ }
+ links, err := parseLinkHeader(nextLinkVal)
+ if err != nil {
+ return err
+ }
+ nextLink := links[config.Rel]
+ if nextLink == "" {
+ return nil
+ }
+ bag.Push(pagination.PageState{
+ Token: nextLink,
+ ResourceTypeID: config.ResourceTypeID,
+ ResourceID: config.ResourceID,
+ })
+ return nil
+ }
+}
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go
index 4fd56e47..d4e42176 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/transport.go
@@ -15,6 +15,27 @@ import (
"golang.org/x/net/http2"
)
+var loggedResponseHeaders = []string{
+ // Limit headers
+ "X-Ratelimit-Limit",
+ "Ratelimit-Limit",
+ "X-RateLimit-Requests-Limit", // Linear uses a non-standard header
+ "X-Rate-Limit-Limit", // Okta uses a non-standard header
+
+ // Remaining headers
+ "X-Ratelimit-Remaining",
+ "Ratelimit-Remaining",
+ "X-RateLimit-Requests-Remaining", // Linear uses a non-standard header
+ "X-Rate-Limit-Remaining", // Okta uses a non-standard header
+
+ // Reset headers
+ "X-Ratelimit-Reset",
+ "Ratelimit-Reset",
+ "X-RateLimit-Requests-Reset", // Linear uses a non-standard header
+ "X-Rate-Limit-Reset", // Okta uses a non-standard header
+ "Retry-After", // Often returned with 429
+}
+
// NewTransport creates a new Transport, applies the options, and then cycles the transport.
func NewTransport(ctx context.Context, options ...Option) (*Transport, error) {
t := newTransport()
@@ -136,6 +157,15 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
if resp != nil {
fields = append(fields, zap.Int("http.status_code", resp.StatusCode))
+
+ headers := make(map[string][]string, len(resp.Header))
+ for _, header := range loggedResponseHeaders {
+ if v := resp.Header.Values(header); len(v) > 0 {
+ headers[header] = v
+ }
+ }
+
+ fields = append(fields, zap.Any("http.headers", headers))
}
t.l(ctx).Debug("Request complete", fields...)
diff --git a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go
index 57ca6b37..8c8034d2 100644
--- a/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go
+++ b/vendor/github.com/conductorone/baton-sdk/pkg/uhttp/wrapper.go
@@ -11,6 +11,7 @@ import (
"net/http"
"net/url"
"os"
+ "reflect"
"syscall"
"time"
@@ -249,6 +250,50 @@ func WithResponse(response interface{}) DoOption {
}
}
+// Handle anything that can be marshaled into JSON or XML.
+// If the response is a list, its values will be put into the "items" field.
+func WithGenericResponse(response *map[string]any) DoOption {
+ return func(resp *WrapperResponse) error {
+ if response == nil {
+ return status.Error(codes.InvalidArgument, "response is nil")
+ }
+ var v any
+ var err error
+
+ if IsJSONContentType(resp.Header.Get(ContentType)) {
+ err = WithJSONResponse(&v)(resp)
+ if err != nil {
+ return err
+ }
+ if list, ok := v.([]any); ok {
+ (*response)["items"] = list
+ } else if vMap, ok := v.(map[string]any); ok {
+ *response = vMap
+ } else {
+ return status.Errorf(codes.Internal, "unsupported content type: %s", reflect.TypeOf(v))
+ }
+ return nil
+ }
+
+ if IsXMLContentType(resp.Header.Get(ContentType)) {
+ err = WithXMLResponse(response)(resp)
+ if err != nil {
+ return err
+ }
+ if list, ok := v.([]any); ok {
+ (*response)["items"] = list
+ } else if vMap, ok := v.(map[string]any); ok {
+ *response = vMap
+ } else {
+ return status.Errorf(codes.Internal, "unsupported content type: %s", reflect.TypeOf(v))
+ }
+ return nil
+ }
+
+ return status.Error(codes.Unknown, "unsupported content type")
+ }
+}
+
func WrapErrors(preferredCode codes.Code, statusMsg string, errs ...error) error {
st := status.New(preferredCode, statusMsg)
@@ -402,6 +447,12 @@ func WithHeader(key, value string) RequestOption {
}
}
+func WithBody(body []byte) RequestOption {
+ return func() (io.ReadWriter, map[string]string, error) {
+ return bytes.NewBuffer(body), nil, nil
+ }
+}
+
func WithJSONBody(body interface{}) RequestOption {
return func() (io.ReadWriter, map[string]string, error) {
buffer := new(bytes.Buffer)
diff --git a/vendor/github.com/containerd/log/.golangci.yml b/vendor/github.com/containerd/log/.golangci.yml
new file mode 100644
index 00000000..a695775d
--- /dev/null
+++ b/vendor/github.com/containerd/log/.golangci.yml
@@ -0,0 +1,30 @@
+linters:
+ enable:
+ - exportloopref # Checks for pointers to enclosing loop variables
+ - gofmt
+ - goimports
+ - gosec
+ - ineffassign
+ - misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
+ - unconvert
+ - unused
+ - vet
+ - dupword # Checks for duplicate words in the source code
+ disable:
+ - errcheck
+
+run:
+ timeout: 5m
+ skip-dirs:
+ - api
+ - cluster
+ - design
+ - docs
+ - docs/man
+ - releases
+ - reports
+ - test # e2e scripts
diff --git a/vendor/github.com/containerd/log/LICENSE b/vendor/github.com/containerd/log/LICENSE
new file mode 100644
index 00000000..584149b6
--- /dev/null
+++ b/vendor/github.com/containerd/log/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/log/README.md b/vendor/github.com/containerd/log/README.md
new file mode 100644
index 00000000..00e08498
--- /dev/null
+++ b/vendor/github.com/containerd/log/README.md
@@ -0,0 +1,17 @@
+# log
+
+A Go package providing a common logging interface across containerd repositories and a way for clients to use and configure logging in containerd packages.
+
+This package is not intended to be used as a standalone logging package outside of the containerd ecosystem and is intended as an interface wrapper around a logging implementation.
+In the future this package may be replaced with a common go logging interface.
+
+## Project details
+
+**log** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
+
diff --git a/vendor/github.com/containerd/log/context.go b/vendor/github.com/containerd/log/context.go
new file mode 100644
index 00000000..20153066
--- /dev/null
+++ b/vendor/github.com/containerd/log/context.go
@@ -0,0 +1,182 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package log provides types and functions related to logging, passing
+// loggers through a context, and attaching context to the logger.
+//
+// # Transitional types
+//
+// This package contains various types that are aliases for types in [logrus].
+// These aliases are intended for transitioning away from hard-coding logrus
+// as logging implementation. Consumers of this package are encouraged to use
+// the type-aliases from this package instead of directly using their logrus
+// equivalent.
+//
+// The intent is to replace these aliases with locally defined types and
+// interfaces once all consumers are no longer directly importing logrus
+// types.
+//
+// IMPORTANT: due to the transitional purpose of this package, it is not
+// guaranteed for the full logrus API to be provided in the future. As
+// outlined, these aliases are provided as a step to transition away from
+// a specific implementation which, as a result, exposes the full logrus API.
+// While no decisions have been made on the ultimate design and interface
+// provided by this package, we do not expect carrying "less common" features.
+package log
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+)
+
+// G is a shorthand for [GetLogger].
+//
+// We may want to define this locally to a package to get package tagged log
+// messages.
+var G = GetLogger
+
+// L is an alias for the standard logger.
+var L = &Entry{
+ Logger: logrus.StandardLogger(),
+ // Default is three fields plus a little extra room.
+ Data: make(Fields, 6),
+}
+
+type loggerKey struct{}
+
+// Fields type to pass to "WithFields".
+type Fields = map[string]any
+
+// Entry is a logging entry. It contains all the fields passed with
+// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn,
+// Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+//
+// Entry is a transitional type, and currently an alias for [logrus.Entry].
+type Entry = logrus.Entry
+
+// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using
+// zeros to ensure the formatted time is always the same number of
+// characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
+// Level is a logging level.
+type Level = logrus.Level
+
+// Supported log levels.
+const (
+ // TraceLevel level. Designates finer-grained informational events
+ // than [DebugLevel].
+ TraceLevel Level = logrus.TraceLevel
+
+ // DebugLevel level. Usually only enabled when debugging. Very verbose
+ // logging.
+ DebugLevel Level = logrus.DebugLevel
+
+ // InfoLevel level. General operational entries about what's going on
+ // inside the application.
+ InfoLevel Level = logrus.InfoLevel
+
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel Level = logrus.WarnLevel
+
+ // ErrorLevel level. Logs errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel Level = logrus.ErrorLevel
+
+ // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits
+ // even if the logging level is set to Panic.
+ FatalLevel Level = logrus.FatalLevel
+
+ // PanicLevel level. This is the highest level of severity. Logs and
+ // then calls panic with the message passed to Debug, Info, ...
+ PanicLevel Level = logrus.PanicLevel
+)
+
+// SetLevel sets log level globally. It returns an error if the given
+// level is not supported.
+//
+// level can be one of:
+//
+// - "trace" ([TraceLevel])
+// - "debug" ([DebugLevel])
+// - "info" ([InfoLevel])
+// - "warn" ([WarnLevel])
+// - "error" ([ErrorLevel])
+// - "fatal" ([FatalLevel])
+// - "panic" ([PanicLevel])
+func SetLevel(level string) error {
+ lvl, err := logrus.ParseLevel(level)
+ if err != nil {
+ return err
+ }
+
+ L.Logger.SetLevel(lvl)
+ return nil
+}
+
+// GetLevel returns the current log level.
+func GetLevel() Level {
+ return L.Logger.GetLevel()
+}
+
+// OutputFormat specifies a log output format.
+type OutputFormat string
+
+// Supported log output formats.
+const (
+ // TextFormat represents the text logging format.
+ TextFormat OutputFormat = "text"
+
+ // JSONFormat represents the JSON logging format.
+ JSONFormat OutputFormat = "json"
+)
+
+// SetFormat sets the log output format ([TextFormat] or [JSONFormat]).
+func SetFormat(format OutputFormat) error {
+ switch format {
+ case TextFormat:
+ L.Logger.SetFormatter(&logrus.TextFormatter{
+ TimestampFormat: RFC3339NanoFixed,
+ FullTimestamp: true,
+ })
+ return nil
+ case JSONFormat:
+ L.Logger.SetFormatter(&logrus.JSONFormatter{
+ TimestampFormat: RFC3339NanoFixed,
+ })
+ return nil
+ default:
+ return fmt.Errorf("unknown log format: %s", format)
+ }
+}
+
+// WithLogger returns a new context with the provided logger. Use in
+// combination with logger.WithField(s) for great effect.
+func WithLogger(ctx context.Context, logger *Entry) context.Context {
+ return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx))
+}
+
+// GetLogger retrieves the current logger from the context. If no logger is
+// available, the default logger is returned.
+func GetLogger(ctx context.Context) *Entry {
+ if logger := ctx.Value(loggerKey{}); logger != nil {
+ return logger.(*Entry)
+ }
+ return L.WithContext(ctx)
+}
diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes
new file mode 100644
index 00000000..a0717e4b
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
\ No newline at end of file
diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml
new file mode 100644
index 00000000..a695775d
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/.golangci.yml
@@ -0,0 +1,30 @@
+linters:
+ enable:
+ - exportloopref # Checks for pointers to enclosing loop variables
+ - gofmt
+ - goimports
+ - gosec
+ - ineffassign
+ - misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
+ - unconvert
+ - unused
+ - vet
+ - dupword # Checks for duplicate words in the source code
+ disable:
+ - errcheck
+
+run:
+ timeout: 5m
+ skip-dirs:
+ - api
+ - cluster
+ - design
+ - docs
+ - docs/man
+ - releases
+ - reports
+ - test # e2e scripts
diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE
new file mode 100644
index 00000000..584149b6
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright The containerd Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md
new file mode 100644
index 00000000..2059de77
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/README.md
@@ -0,0 +1,32 @@
+# platforms
+
+A Go package for formatting, normalizing and matching container platforms.
+
+This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52).
+
+## Platform Specifier
+
+While the OCI platform specifications provide a tool for components to
+specify structured information, user input typically doesn't need the full
+context and much can be inferred. To solve this problem, this package introduces
+"specifiers". A specifier has the format
+`||/[/]`. The user can provide either the
+operating system or the architecture or both.
+
+An example of a common specifier is `linux/amd64`. If the host has a default
+runtime that matches this, the user can simply provide the component that
+matters. For example, if an image provides `amd64` and `arm64` support, the
+operating system, `linux` can be inferred, so they only have to provide
+`arm64` or `amd64`. Similar behavior is implemented for operating systems,
+where the architecture may be known but a runtime may support images from
+different operating systems.
+
+## Project details
+
+**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
+As a containerd sub-project, you will find the:
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
+
+information in our [`containerd/project`](https://github.com/containerd/project) repository.
\ No newline at end of file
diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go
new file mode 100644
index 00000000..3913ef66
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/compare.go
@@ -0,0 +1,203 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// MatchComparer is able to match and compare platforms to
+// filter and sort platforms.
+type MatchComparer interface {
+ Matcher
+
+ Less(specs.Platform, specs.Platform) bool
+}
+
+// platformVector returns an (ordered) vector of appropriate specs.Platform
+// objects to try matching for the given platform object (see platforms.Only).
+func platformVector(platform specs.Platform) []specs.Platform {
+ vector := []specs.Platform{platform}
+
+ switch platform.Architecture {
+ case "amd64":
+ if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 {
+ for amd64Version--; amd64Version >= 1; amd64Version-- {
+ vector = append(vector, specs.Platform{
+ Architecture: platform.Architecture,
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: "v" + strconv.Itoa(amd64Version),
+ })
+ }
+ }
+ vector = append(vector, specs.Platform{
+ Architecture: "386",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ })
+ case "arm":
+ if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
+ for armVersion--; armVersion >= 5; armVersion-- {
+ vector = append(vector, specs.Platform{
+ Architecture: platform.Architecture,
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: "v" + strconv.Itoa(armVersion),
+ })
+ }
+ }
+ case "arm64":
+ variant := platform.Variant
+ if variant == "" {
+ variant = "v8"
+ }
+ vector = append(vector, platformVector(specs.Platform{
+ Architecture: "arm",
+ OS: platform.OS,
+ OSVersion: platform.OSVersion,
+ OSFeatures: platform.OSFeatures,
+ Variant: variant,
+ })...)
+ }
+
+ return vector
+}
+
+// Only returns a match comparer for a single platform
+// using default resolution logic for the platform.
+//
+// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
+// For arm/v7, will also match arm/v6 and arm/v5
+// For arm/v6, will also match arm/v5
+// For amd64, will also match 386
+func Only(platform specs.Platform) MatchComparer {
+ return Ordered(platformVector(Normalize(platform))...)
+}
+
+// OnlyStrict returns a match comparer for a single platform.
+//
+// Unlike Only, OnlyStrict does not match sub platforms.
+// So, "arm/vN" will not match "arm/vM" where M < N,
+// and "amd64" will not also match "386".
+//
+// OnlyStrict matches non-canonical forms.
+// So, "arm64" matches "arm/64/v8".
+func OnlyStrict(platform specs.Platform) MatchComparer {
+ return Ordered(Normalize(platform))
+}
+
+// Ordered returns a platform MatchComparer which matches any of the platforms
+// but orders them in order they are provided.
+func Ordered(platforms ...specs.Platform) MatchComparer {
+ matchers := make([]Matcher, len(platforms))
+ for i := range platforms {
+ matchers[i] = NewMatcher(platforms[i])
+ }
+ return orderedPlatformComparer{
+ matchers: matchers,
+ }
+}
+
+// Any returns a platform MatchComparer which matches any of the platforms
+// with no preference for ordering.
+func Any(platforms ...specs.Platform) MatchComparer {
+ matchers := make([]Matcher, len(platforms))
+ for i := range platforms {
+ matchers[i] = NewMatcher(platforms[i])
+ }
+ return anyPlatformComparer{
+ matchers: matchers,
+ }
+}
+
+// All is a platform MatchComparer which matches all platforms
+// with preference for ordering.
+var All MatchComparer = allPlatformComparer{}
+
+type orderedPlatformComparer struct {
+ matchers []Matcher
+}
+
+func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
+ for _, m := range c.matchers {
+ if m.Match(platform) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
+ for _, m := range c.matchers {
+ p1m := m.Match(p1)
+ p2m := m.Match(p2)
+ if p1m && !p2m {
+ return true
+ }
+ if p1m || p2m {
+ return false
+ }
+ }
+ return false
+}
+
+type anyPlatformComparer struct {
+ matchers []Matcher
+}
+
+func (c anyPlatformComparer) Match(platform specs.Platform) bool {
+ for _, m := range c.matchers {
+ if m.Match(platform) {
+ return true
+ }
+ }
+ return false
+}
+
+func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
+ var p1m, p2m bool
+ for _, m := range c.matchers {
+ if !p1m && m.Match(p1) {
+ p1m = true
+ }
+ if !p2m && m.Match(p2) {
+ p2m = true
+ }
+ if p1m && p2m {
+ return false
+ }
+ }
+ // If one matches, and the other does, sort match first
+ return p1m && !p2m
+}
+
+type allPlatformComparer struct{}
+
+func (allPlatformComparer) Match(specs.Platform) bool {
+ return true
+}
+
+func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
+ return false
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go
new file mode 100644
index 00000000..91f50e8c
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+ "sync"
+
+ "github.com/containerd/log"
+)
+
+// Present the ARM instruction set architecture, eg: v7, v8
+// Don't use this value directly; call cpuVariant() instead.
+var cpuVariantValue string
+
+var cpuVariantOnce sync.Once
+
+func cpuVariant() string {
+ cpuVariantOnce.Do(func() {
+ if isArmArch(runtime.GOARCH) {
+ var err error
+ cpuVariantValue, err = getCPUVariant()
+ if err != nil {
+ log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err)
+ }
+ }
+ })
+ return cpuVariantValue
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go
new file mode 100644
index 00000000..98c7001f
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo_linux.go
@@ -0,0 +1,160 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+
+ "golang.org/x/sys/unix"
+)
+
+// getMachineArch retrieves the machine architecture through system call
+func getMachineArch() (string, error) {
+ var uname unix.Utsname
+ err := unix.Uname(&uname)
+ if err != nil {
+ return "", err
+ }
+
+ arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)])
+
+ return arch, nil
+}
+
+// For Linux, the kernel has already detected the ABI, ISA and Features.
+// So we don't need to access the ARM registers to detect platform information
+// by ourselves. We can just parse these information from /proc/cpuinfo
+func getCPUInfo(pattern string) (info string, err error) {
+
+ cpuinfo, err := os.Open("/proc/cpuinfo")
+ if err != nil {
+ return "", err
+ }
+ defer cpuinfo.Close()
+
+ // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
+ // the first core is enough.
+ scanner := bufio.NewScanner(cpuinfo)
+ for scanner.Scan() {
+ newline := scanner.Text()
+ list := strings.Split(newline, ":")
+
+ if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
+ return strings.TrimSpace(list[1]), nil
+ }
+ }
+
+ // Check whether the scanner encountered errors
+ err = scanner.Err()
+ if err != nil {
+ return "", err
+ }
+
+ return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound)
+}
+
+// getCPUVariantFromArch get CPU variant from arch through a system call
+func getCPUVariantFromArch(arch string) (string, error) {
+
+ var variant string
+
+ arch = strings.ToLower(arch)
+
+ if arch == "aarch64" {
+ variant = "8"
+ } else if arch[0:4] == "armv" && len(arch) >= 5 {
+ // Valid arch format is in form of armvXx
+ switch arch[3:5] {
+ case "v8":
+ variant = "8"
+ case "v7":
+ variant = "7"
+ case "v6":
+ variant = "6"
+ case "v5":
+ variant = "5"
+ case "v4":
+ variant = "4"
+ case "v3":
+ variant = "3"
+ default:
+ variant = "unknown"
+ }
+ } else {
+ return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument)
+ }
+ return variant, nil
+}
+
+// getCPUVariant returns cpu variant for ARM
+// We first try reading "Cpu architecture" field from /proc/cpuinfo
+// If we can't find it, then fall back using a system call
+// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo
+// was not present.
+func getCPUVariant() (string, error) {
+ variant, err := getCPUInfo("Cpu architecture")
+ if err != nil {
+ if errors.Is(err, errNotFound) {
+ // Let's try getting CPU variant from machine architecture
+ arch, err := getMachineArch()
+ if err != nil {
+ return "", fmt.Errorf("failure getting machine architecture: %v", err)
+ }
+
+ variant, err = getCPUVariantFromArch(arch)
+ if err != nil {
+ return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err)
+ }
+ } else {
+ return "", fmt.Errorf("failure getting CPU variant: %v", err)
+ }
+ }
+
+ // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
+ // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
+ if runtime.GOARCH == "arm" && variant == "7" {
+ model, err := getCPUInfo("model name")
+ if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
+ variant = "6"
+ }
+ }
+
+ switch strings.ToLower(variant) {
+ case "8", "aarch64":
+ variant = "v8"
+ case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
+ variant = "v7"
+ case "6", "6tej":
+ variant = "v6"
+ case "5", "5t", "5te", "5tej":
+ variant = "v5"
+ case "4", "4t":
+ variant = "v4"
+ case "3":
+ variant = "v3"
+ default:
+ variant = "unknown"
+ }
+
+ return variant, nil
+}
diff --git a/vendor/github.com/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go
new file mode 100644
index 00000000..97a1fe8a
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/cpuinfo_other.go
@@ -0,0 +1,55 @@
+//go:build !linux
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func getCPUVariant() (string, error) {
+
+ var variant string
+
+ if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
+ // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
+ // runtime.GOARCH to determine the variants
+ switch runtime.GOARCH {
+ case "arm64":
+ variant = "v8"
+ case "arm":
+ variant = "v7"
+ default:
+ variant = "unknown"
+ }
+ } else if runtime.GOOS == "freebsd" {
+ // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated)
+ // detecting those variants is currently unimplemented
+ switch runtime.GOARCH {
+ case "arm64":
+ variant = "v8"
+ default:
+ variant = "unknown"
+ }
+ } else {
+ return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented)
+ }
+
+ return variant, nil
+}
diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go
new file mode 100644
index 00000000..2e26fd3b
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/database.go
@@ -0,0 +1,109 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+ "strings"
+)
+
+// These function are generated from https://golang.org/src/go/build/syslist.go.
+//
+// We use switch statements because they are slightly faster than map lookups
+// and use a little less memory.
+
+// isKnownOS returns true if we know about the operating system.
+//
+// The OS value should be normalized before calling this function.
+func isKnownOS(os string) bool {
+ switch os {
+ case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
+ return true
+ }
+ return false
+}
+
+// isArmArch returns true if the architecture is ARM.
+//
+// The arch value should be normalized before being passed to this function.
+func isArmArch(arch string) bool {
+ switch arch {
+ case "arm", "arm64":
+ return true
+ }
+ return false
+}
+
+// isKnownArch returns true if we know about the architecture.
+//
+// The arch value should be normalized before being passed to this function.
+func isKnownArch(arch string) bool {
+ switch arch {
+ case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
+ return true
+ }
+ return false
+}
+
+func normalizeOS(os string) string {
+ if os == "" {
+ return runtime.GOOS
+ }
+ os = strings.ToLower(os)
+
+ switch os {
+ case "macos":
+ os = "darwin"
+ }
+ return os
+}
+
+// normalizeArch normalizes the architecture.
+func normalizeArch(arch, variant string) (string, string) {
+ arch, variant = strings.ToLower(arch), strings.ToLower(variant)
+ switch arch {
+ case "i386":
+ arch = "386"
+ variant = ""
+ case "x86_64", "x86-64", "amd64":
+ arch = "amd64"
+ if variant == "v1" {
+ variant = ""
+ }
+ case "aarch64", "arm64":
+ arch = "arm64"
+ switch variant {
+ case "8", "v8":
+ variant = ""
+ }
+ case "armhf":
+ arch = "arm"
+ variant = "v7"
+ case "armel":
+ arch = "arm"
+ variant = "v6"
+ case "arm":
+ switch variant {
+ case "", "7":
+ variant = "v7"
+ case "5", "6", "8":
+ variant = "v" + variant
+ }
+ }
+
+ return arch, variant
+}
diff --git a/vendor/github.com/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go
new file mode 100644
index 00000000..9d898d60
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults.go
@@ -0,0 +1,29 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+// DefaultString returns the default string specifier for the platform,
+// with [PR#6](https://github.com/containerd/platforms/pull/6) the result
+// may now also include the OSVersion from the provided platform specification.
+func DefaultString() string {
+ return FormatAll(DefaultSpec())
+}
+
+// DefaultStrict returns strict form of Default.
+func DefaultStrict() MatchComparer {
+ return OnlyStrict(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go
new file mode 100644
index 00000000..72355ca8
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_darwin.go
@@ -0,0 +1,44 @@
+//go:build darwin
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Ordered(DefaultSpec(), specs.Platform{
+ // darwin runtime also supports Linux binary via runu/LKL
+ OS: "linux",
+ Architecture: runtime.GOARCH,
+ })
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go
new file mode 100644
index 00000000..d3fe89e0
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_freebsd.go
@@ -0,0 +1,43 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Ordered(DefaultSpec(), specs.Platform{
+ OS: "linux",
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ })
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go
new file mode 100644
index 00000000..44acc47e
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_unix.go
@@ -0,0 +1,40 @@
+//go:build !windows && !darwin && !freebsd
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "runtime"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+// Default returns the default matcher for the platform.
+func Default() MatchComparer {
+ return Only(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go
new file mode 100644
index 00000000..427ed72e
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/defaults_windows.go
@@ -0,0 +1,118 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ "fmt"
+ "runtime"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+ "golang.org/x/sys/windows"
+)
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+ major, minor, build := windows.RtlGetNtVersionNumbers()
+ return specs.Platform{
+ OS: runtime.GOOS,
+ Architecture: runtime.GOARCH,
+ OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build),
+ // The Variant field will be empty if arch != ARM.
+ Variant: cpuVariant(),
+ }
+}
+
+type windowsmatcher struct {
+ specs.Platform
+ osVersionPrefix string
+ defaultMatcher Matcher
+}
+
+// Match matches platform with the same windows major, minor
+// and build version.
+func (m windowsmatcher) Match(p specs.Platform) bool {
+ match := m.defaultMatcher.Match(p)
+
+ if match && m.OS == "windows" {
+ // HPC containers do not have OS version filled
+ if m.OSVersion == "" || p.OSVersion == "" {
+ return true
+ }
+
+ hostOsVersion := getOSVersion(m.osVersionPrefix)
+ ctrOsVersion := getOSVersion(p.OSVersion)
+ return checkHostAndContainerCompat(hostOsVersion, ctrOsVersion)
+ }
+
+ return match
+}
+
+func getOSVersion(osVersionPrefix string) osVersion {
+ parts := strings.Split(osVersionPrefix, ".")
+ if len(parts) < 3 {
+ return osVersion{}
+ }
+
+ majorVersion, _ := strconv.Atoi(parts[0])
+ minorVersion, _ := strconv.Atoi(parts[1])
+ buildNumber, _ := strconv.Atoi(parts[2])
+
+ return osVersion{
+ MajorVersion: uint8(majorVersion),
+ MinorVersion: uint8(minorVersion),
+ Build: uint16(buildNumber),
+ }
+}
+
+// Less sorts matched platforms in front of other platforms.
+// For matched platforms, it puts platforms with larger revision
+// number in front.
+func (m windowsmatcher) Less(p1, p2 specs.Platform) bool {
+ m1, m2 := m.Match(p1), m.Match(p2)
+ if m1 && m2 {
+ r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion)
+ return r1 > r2
+ }
+ return m1 && !m2
+}
+
+func revision(v string) int {
+ parts := strings.Split(v, ".")
+ if len(parts) < 4 {
+ return 0
+ }
+ r, err := strconv.Atoi(parts[3])
+ if err != nil {
+ return 0
+ }
+ return r
+}
+
+func prefix(v string) string {
+ parts := strings.Split(v, ".")
+ if len(parts) < 4 {
+ return v
+ }
+ return strings.Join(parts[0:3], ".")
+}
+
+// Default returns the current platform's default platform specification.
+func Default() MatchComparer {
+ return Only(DefaultSpec())
+}
diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go
new file mode 100644
index 00000000..5ad721e7
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/errors.go
@@ -0,0 +1,30 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import "errors"
+
+// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs],
+// however, they are not exported as they are not expected to be used as sentinel
+// errors by consumers of this package.
+//
+//nolint:unused // not all errors are used on all platforms.
+var (
+ errNotFound = errors.New("not found")
+ errInvalidArgument = errors.New("invalid argument")
+ errNotImplemented = errors.New("not implemented")
+)
diff --git a/vendor/github.com/containerd/platforms/platform_compat_windows.go b/vendor/github.com/containerd/platforms/platform_compat_windows.go
new file mode 100644
index 00000000..89e66f0c
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platform_compat_windows.go
@@ -0,0 +1,78 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+// osVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type osVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// Windows Client and Server build numbers.
+//
+// See:
+// https://learn.microsoft.com/en-us/windows/release-health/release-information
+// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info
+// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information
+const (
+ // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server
+ // 2019 (ltsc2019), and Windows 10 (October 2018 Update).
+ rs5 = 17763
+
+ // v21H2Server corresponds to Windows Server 2022 (ltsc2022).
+ v21H2Server = 20348
+
+ // v22H2Win11 corresponds to Windows 11 (2022 Update).
+ v22H2Win11 = 22621
+)
+
+// List of stable ABI compliant ltsc releases
+// Note: List must be sorted in ascending order
+var compatLTSCReleases = []uint16{
+ v21H2Server,
+}
+
+// CheckHostAndContainerCompat checks if given host and container
+// OS versions are compatible.
+// It includes support for stable ABI compliant versions as well.
+// Every release after WS 2022 will support the previous ltsc
+// container image. Stable ABI is in preview mode for windows 11 client.
+// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility
+func checkHostAndContainerCompat(host, ctr osVersion) bool {
+ // check major minor versions of host and guest
+ if host.MajorVersion != ctr.MajorVersion ||
+ host.MinorVersion != ctr.MinorVersion {
+ return false
+ }
+
+ // If host is < WS 2022, exact version match is required
+ if host.Build < v21H2Server {
+ return host.Build == ctr.Build
+ }
+
+ var supportedLtscRelease uint16
+ for i := len(compatLTSCReleases) - 1; i >= 0; i-- {
+ if host.Build >= compatLTSCReleases[i] {
+ supportedLtscRelease = compatLTSCReleases[i]
+ break
+ }
+ }
+ return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build
+}
diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go
new file mode 100644
index 00000000..1bbbdb91
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms.go
@@ -0,0 +1,308 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+// Package platforms provides a toolkit for normalizing, matching and
+// specifying container platforms.
+//
+// Centered around OCI platform specifications, we define a string-based
+// specifier syntax that can be used for user input. With a specifier, users
+// only need to specify the parts of the platform that are relevant to their
+// context, providing an operating system or architecture or both.
+//
+// How do I use this package?
+//
+// The vast majority of use cases should simply use the match function with
+// user input. The first step is to parse a specifier into a matcher:
+//
+// m, err := Parse("linux")
+// if err != nil { ... }
+//
+// Once you have a matcher, use it to match against the platform declared by a
+// component, typically from an image or runtime. Since extracting an images
+// platform is a little more involved, we'll use an example against the
+// platform default:
+//
+// if ok := m.Match(Default()); !ok { /* doesn't match */ }
+//
+// This can be composed in loops for resolving runtimes or used as a filter for
+// fetch and select images.
+//
+// More details of the specifier syntax and platform spec follow.
+//
+// # Declaring Platform Support
+//
+// Components that have strict platform requirements should use the OCI
+// platform specification to declare their support. Typically, this will be
+// images and runtimes that should make these declaring which platform they
+// support specifically. This looks roughly as follows:
+//
+// type Platform struct {
+// Architecture string
+// OS string
+// Variant string
+// }
+//
+// Most images and runtimes should at least set Architecture and OS, according
+// to their GOARCH and GOOS values, respectively (follow the OCI image
+// specification when in doubt). ARM should set variant under certain
+// discussions, which are outlined below.
+//
+// # Platform Specifiers
+//
+// While the OCI platform specifications provide a tool for components to
+// specify structured information, user input typically doesn't need the full
+// context and much can be inferred. To solve this problem, we introduced
+// "specifiers". A specifier has the format
+// `||/[/]`. The user can provide either the
+// operating system or the architecture or both.
+//
+// An example of a common specifier is `linux/amd64`. If the host has a default
+// of runtime that matches this, the user can simply provide the component that
+// matters. For example, if a image provides amd64 and arm64 support, the
+// operating system, `linux` can be inferred, so they only have to provide
+// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
+// where the architecture may be known but a runtime may support images from
+// different operating systems.
+//
+// # Normalization
+//
+// Because not all users are familiar with the way the Go runtime represents
+// platforms, several normalizations have been provided to make this package
+// easier to user.
+//
+// The following are performed for architectures:
+//
+// Value Normalized
+// aarch64 arm64
+// armhf arm
+// armel arm/v6
+// i386 386
+// x86_64 amd64
+// x86-64 amd64
+//
+// We also normalize the operating system `macos` to `darwin`.
+//
+// # ARM Support
+//
+// To qualify ARM architecture, the Variant field is used to qualify the arm
+// version. The most common arm version, v7, is represented without the variant
+// unless it is explicitly provided. This is treated as equivalent to armhf. A
+// previous architecture, armel, will be normalized to arm/v6.
+//
+// Similarly, the most common arm64 version v8, and most common amd64 version v1
+// are represented without the variant.
+//
+// While these normalizations are provided, their support on arm platforms has
+// not yet been fully implemented and tested.
+package platforms
+
+import (
+ "fmt"
+ "path"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+var (
+ specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
+ osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`)
+)
+
+const osAndVersionFormat = "%s(%s)"
+
+// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere.
+type Platform = specs.Platform
+
+// Matcher matches platforms specifications, provided by an image or runtime.
+type Matcher interface {
+ Match(platform specs.Platform) bool
+}
+
+// NewMatcher returns a simple matcher based on the provided platform
+// specification. The returned matcher only looks for equality based on os,
+// architecture and variant.
+//
+// One may implement their own matcher if this doesn't provide the required
+// functionality.
+//
+// Applications should opt to use `Match` over directly parsing specifiers.
+func NewMatcher(platform specs.Platform) Matcher {
+ return newDefaultMatcher(platform)
+}
+
+type matcher struct {
+ specs.Platform
+}
+
+func (m *matcher) Match(platform specs.Platform) bool {
+ normalized := Normalize(platform)
+ return m.OS == normalized.OS &&
+ m.Architecture == normalized.Architecture &&
+ m.Variant == normalized.Variant
+}
+
+func (m *matcher) String() string {
+ return FormatAll(m.Platform)
+}
+
+// ParseAll parses a list of platform specifiers into a list of platform.
+func ParseAll(specifiers []string) ([]specs.Platform, error) {
+ platforms := make([]specs.Platform, len(specifiers))
+ for i, s := range specifiers {
+ p, err := Parse(s)
+ if err != nil {
+ return nil, fmt.Errorf("invalid platform %s: %w", s, err)
+ }
+ platforms[i] = p
+ }
+ return platforms, nil
+}
+
+// Parse parses the platform specifier syntax into a platform declaration.
+//
+// Platform specifiers are in the format `[()]||[()]/[/]`.
+// The minimum required information for a platform specifier is the operating
+// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)`
+// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value,
+// and an empty string otherwise.
+// If there is only a single string (no slashes), the
+// value will be matched against the known set of operating systems, then fall
+// back to the known set of architectures. The missing component will be
+// inferred based on the local environment.
+func Parse(specifier string) (specs.Platform, error) {
+ if strings.Contains(specifier, "*") {
+ // TODO(stevvooe): need to work out exact wildcard handling
+ return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument)
+ }
+
+ // Limit to 4 elements to prevent unbounded split
+ parts := strings.SplitN(specifier, "/", 4)
+
+ var p specs.Platform
+ for i, part := range parts {
+ if i == 0 {
+ // First element is [()]
+ osVer := osAndVersionRe.FindStringSubmatch(part)
+ if osVer == nil {
+ return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument)
+ }
+
+ p.OS = normalizeOS(osVer[1])
+ p.OSVersion = osVer[2]
+ } else {
+ if !specifierRe.MatchString(part) {
+ return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument)
+ }
+ }
+ }
+
+ switch len(parts) {
+ case 1:
+ // in this case, we will test that the value might be an OS (with or
+ // without the optional OSVersion specified) and look it up.
+ // If it is not known, we'll treat it as an architecture. Since
+ // we have very little information about the platform here, we are
+ // going to be a little more strict if we don't know about the argument
+ // value.
+ if isKnownOS(p.OS) {
+ // picks a default architecture
+ p.Architecture = runtime.GOARCH
+ if p.Architecture == "arm" && cpuVariant() != "v7" {
+ p.Variant = cpuVariant()
+ }
+
+ return p, nil
+ }
+
+ p.Architecture, p.Variant = normalizeArch(parts[0], "")
+ if p.Architecture == "arm" && p.Variant == "v7" {
+ p.Variant = ""
+ }
+ if isKnownArch(p.Architecture) {
+ p.OS = runtime.GOOS
+ return p, nil
+ }
+
+ return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument)
+ case 2:
+ // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care
+ // about whether or not we know of the platform.
+ p.Architecture, p.Variant = normalizeArch(parts[1], "")
+ if p.Architecture == "arm" && p.Variant == "v7" {
+ p.Variant = ""
+ }
+
+ return p, nil
+ case 3:
+ // we have a fully specified variant, this is rare
+ p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
+ if p.Architecture == "arm64" && p.Variant == "" {
+ p.Variant = "v8"
+ }
+
+ return p, nil
+ }
+
+ return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument)
+}
+
+// MustParse is like Parses but panics if the specifier cannot be parsed.
+// Simplifies initialization of global variables.
+func MustParse(specifier string) specs.Platform {
+ p, err := Parse(specifier)
+ if err != nil {
+ panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
+ }
+ return p
+}
+
+// Format returns a string specifier from the provided platform specification.
+func Format(platform specs.Platform) string {
+ if platform.OS == "" {
+ return "unknown"
+ }
+
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
+
+// FormatAll returns a string specifier that also includes the OSVersion from the
+// provided platform specification.
+func FormatAll(platform specs.Platform) string {
+ if platform.OS == "" {
+ return "unknown"
+ }
+
+ if platform.OSVersion != "" {
+ OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion)
+ return path.Join(OSAndVersion, platform.Architecture, platform.Variant)
+ }
+ return path.Join(platform.OS, platform.Architecture, platform.Variant)
+}
+
+// Normalize validates and translate the platform to the canonical value.
+//
+// For example, if "Aarch64" is encountered, we change it to "arm64" or if
+// "x86_64" is encountered, it becomes "amd64".
+func Normalize(platform specs.Platform) specs.Platform {
+ platform.OS = normalizeOS(platform.OS)
+ platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
+
+ return platform
+}
diff --git a/vendor/github.com/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/platforms/platforms_other.go
new file mode 100644
index 00000000..03f4dcd9
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms_other.go
@@ -0,0 +1,30 @@
+//go:build !windows
+
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// NewMatcher returns the default Matcher for containerd
+func newDefaultMatcher(platform specs.Platform) Matcher {
+ return &matcher{
+ Platform: Normalize(platform),
+ }
+}
diff --git a/vendor/github.com/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/platforms/platforms_windows.go
new file mode 100644
index 00000000..950e2a2d
--- /dev/null
+++ b/vendor/github.com/containerd/platforms/platforms_windows.go
@@ -0,0 +1,34 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package platforms
+
+import (
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// NewMatcher returns a Windows matcher that will match on osVersionPrefix if
+// the platform is Windows otherwise use the default matcher
+func newDefaultMatcher(platform specs.Platform) Matcher {
+ prefix := prefix(platform.OSVersion)
+ return windowsmatcher{
+ Platform: platform,
+ osVersionPrefix: prefix,
+ defaultMatcher: &matcher{
+ Platform: Normalize(platform),
+ },
+ }
+}
diff --git a/vendor/github.com/cpuguy83/dockercfg/LICENSE b/vendor/github.com/cpuguy83/dockercfg/LICENSE
new file mode 100644
index 00000000..8ed68180
--- /dev/null
+++ b/vendor/github.com/cpuguy83/dockercfg/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/cpuguy83/dockercfg/README.md b/vendor/github.com/cpuguy83/dockercfg/README.md
new file mode 100644
index 00000000..880ab801
--- /dev/null
+++ b/vendor/github.com/cpuguy83/dockercfg/README.md
@@ -0,0 +1,8 @@
+### github.com/cpuguy83/dockercfg
+Go library to load docker CLI configs, auths, etc. with minimal deps.
+So far the only deps are on the stdlib.
+
+### Usage
+See the [godoc](https://godoc.org/github.com/cpuguy83/dockercfg) for API details.
+
+I'm currently using this in [zapp](https://github.com/cpuguy83/zapp/blob/d25c43d4cd7ccf29fba184aafbc720a753e1a15d/main.go#L58-L83) to handle registry auth instead of always asking the user to enter it.
\ No newline at end of file
diff --git a/vendor/github.com/cpuguy83/dockercfg/auth.go b/vendor/github.com/cpuguy83/dockercfg/auth.go
new file mode 100644
index 00000000..106ab847
--- /dev/null
+++ b/vendor/github.com/cpuguy83/dockercfg/auth.go
@@ -0,0 +1,215 @@
+package dockercfg
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os/exec"
+ "runtime"
+ "strings"
+)
+
+// This is used by the docker CLI in cases where an oauth identity token is used.
+// In that case the username is stored literally as ``
+// When fetching the credentials we check for this value to determine if.
+const tokenUsername = ""
+
+// GetRegistryCredentials gets registry credentials for the passed in registry host.
+//
+// This will use [LoadDefaultConfig] to read registry auth details from the config.
+// If the config doesn't exist, it will attempt to load registry credentials using the default credential helper for the platform.
+func GetRegistryCredentials(hostname string) (string, string, error) {
+ cfg, err := LoadDefaultConfig()
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ return "", "", fmt.Errorf("load default config: %w", err)
+ }
+
+ return GetCredentialsFromHelper("", hostname)
+ }
+
+ return cfg.GetRegistryCredentials(hostname)
+}
+
+// ResolveRegistryHost can be used to transform a docker registry host name into what is used for the docker config/cred helpers
+//
+// This is useful for using with containerd authorizers.
+// Naturally this only transforms docker hub URLs.
+func ResolveRegistryHost(host string) string {
+ switch host {
+ case "index.docker.io", "docker.io", "https://index.docker.io/v1/", "registry-1.docker.io":
+ return "https://index.docker.io/v1/"
+ }
+ return host
+}
+
+// GetRegistryCredentials gets credentials, if any, for the provided hostname.
+//
+// Hostnames should already be resolved using [ResolveRegistryHost].
+//
+// If the returned username string is empty, the password is an identity token.
+func (c *Config) GetRegistryCredentials(hostname string) (string, string, error) {
+ h, ok := c.CredentialHelpers[hostname]
+ if ok {
+ return GetCredentialsFromHelper(h, hostname)
+ }
+
+ if c.CredentialsStore != "" {
+ username, password, err := GetCredentialsFromHelper(c.CredentialsStore, hostname)
+ if err != nil {
+ return "", "", fmt.Errorf("get credentials from store: %w", err)
+ }
+
+ if username != "" || password != "" {
+ return username, password, nil
+ }
+ }
+
+ auth, ok := c.AuthConfigs[hostname]
+ if !ok {
+ return GetCredentialsFromHelper("", hostname)
+ }
+
+ if auth.IdentityToken != "" {
+ return "", auth.IdentityToken, nil
+ }
+
+ if auth.Username != "" && auth.Password != "" {
+ return auth.Username, auth.Password, nil
+ }
+
+ return DecodeBase64Auth(auth)
+}
+
+// DecodeBase64Auth decodes the legacy file-based auth storage from the docker CLI.
+// It takes the "Auth" filed from AuthConfig and decodes that into a username and password.
+//
+// If "Auth" is empty, an empty user/pass will be returned, but not an error.
+func DecodeBase64Auth(auth AuthConfig) (string, string, error) {
+ if auth.Auth == "" {
+ return "", "", nil
+ }
+
+ decLen := base64.StdEncoding.DecodedLen(len(auth.Auth))
+ decoded := make([]byte, decLen)
+ n, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth))
+ if err != nil {
+ return "", "", fmt.Errorf("decode auth: %w", err)
+ }
+
+ decoded = decoded[:n]
+
+ const sep = ":"
+ user, pass, found := strings.Cut(string(decoded), sep)
+ if !found {
+ return "", "", fmt.Errorf("invalid auth: missing %q separator", sep)
+ }
+
+ return user, pass, nil
+}
+
+// Errors from credential helpers.
+var (
+ ErrCredentialsNotFound = errors.New("credentials not found in native keychain")
+ ErrCredentialsMissingServerURL = errors.New("no credentials server URL")
+)
+
+//nolint:gochecknoglobals // These are used to mock exec in tests.
+var (
+ // execLookPath is a variable that can be used to mock exec.LookPath in tests.
+ execLookPath = exec.LookPath
+ // execCommand is a variable that can be used to mock exec.Command in tests.
+ execCommand = exec.Command
+)
+
+// GetCredentialsFromHelper attempts to lookup credentials from the passed in docker credential helper.
+//
+// The credential helper should just be the suffix name (no "docker-credential-").
+// If the passed in helper program is empty this will look up the default helper for the platform.
+//
+// If the credentials are not found, no error is returned, only empty credentials.
+//
+// Hostnames should already be resolved using [ResolveRegistryHost]
+//
+// If the username string is empty, the password string is an identity token.
+func GetCredentialsFromHelper(helper, hostname string) (string, string, error) {
+ if helper == "" {
+ helper, helperErr := getCredentialHelper()
+ if helperErr != nil {
+ return "", "", fmt.Errorf("get credential helper: %w", helperErr)
+ }
+
+ if helper == "" {
+ return "", "", nil
+ }
+ }
+
+ helper = "docker-credential-" + helper
+ p, err := execLookPath(helper)
+ if err != nil {
+ if !errors.Is(err, exec.ErrNotFound) {
+ return "", "", fmt.Errorf("look up %q: %w", helper, err)
+ }
+
+ return "", "", nil
+ }
+
+ var outBuf, errBuf bytes.Buffer
+ cmd := execCommand(p, "get")
+ cmd.Stdin = strings.NewReader(hostname)
+ cmd.Stdout = &outBuf
+ cmd.Stderr = &errBuf
+
+ if err = cmd.Run(); err != nil {
+ out := strings.TrimSpace(outBuf.String())
+ switch out {
+ case ErrCredentialsNotFound.Error():
+ return "", "", nil
+ case ErrCredentialsMissingServerURL.Error():
+ return "", "", ErrCredentialsMissingServerURL
+ default:
+ return "", "", fmt.Errorf("execute %q stdout: %q stderr: %q: %w",
+ helper, out, strings.TrimSpace(errBuf.String()), err,
+ )
+ }
+ }
+
+ var creds struct {
+ Username string `json:"Username"`
+ Secret string `json:"Secret"`
+ }
+
+ if err = json.Unmarshal(outBuf.Bytes(), &creds); err != nil {
+ return "", "", fmt.Errorf("unmarshal credentials from: %q: %w", helper, err)
+ }
+
+ // When tokenUsername is used, the output is an identity token and the username is garbage.
+ if creds.Username == tokenUsername {
+ creds.Username = ""
+ }
+
+ return creds.Username, creds.Secret, nil
+}
+
+// getCredentialHelper gets the default credential helper name for the current platform.
+func getCredentialHelper() (string, error) {
+ switch runtime.GOOS {
+ case "linux":
+ if _, err := exec.LookPath("pass"); err != nil {
+ if errors.Is(err, exec.ErrNotFound) {
+ return "secretservice", nil
+ }
+ return "", fmt.Errorf(`look up "pass": %w`, err)
+ }
+ return "pass", nil
+ case "darwin":
+ return "osxkeychain", nil
+ case "windows":
+ return "wincred", nil
+ default:
+ return "", nil
+ }
+}
diff --git a/vendor/github.com/cpuguy83/dockercfg/config.go b/vendor/github.com/cpuguy83/dockercfg/config.go
new file mode 100644
index 00000000..5e539079
--- /dev/null
+++ b/vendor/github.com/cpuguy83/dockercfg/config.go
@@ -0,0 +1,65 @@
+package dockercfg
+
+// Config represents the on disk format of the docker CLI's config file.
+type Config struct {
+ AuthConfigs map[string]AuthConfig `json:"auths"`
+ HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
+ PsFormat string `json:"psFormat,omitempty"`
+ ImagesFormat string `json:"imagesFormat,omitempty"`
+ NetworksFormat string `json:"networksFormat,omitempty"`
+ PluginsFormat string `json:"pluginsFormat,omitempty"`
+ VolumesFormat string `json:"volumesFormat,omitempty"`
+ StatsFormat string `json:"statsFormat,omitempty"`
+ DetachKeys string `json:"detachKeys,omitempty"`
+ CredentialsStore string `json:"credsStore,omitempty"`
+ CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
+ Filename string `json:"-"` // Note: for internal use only.
+ ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
+ ServicesFormat string `json:"servicesFormat,omitempty"`
+ TasksFormat string `json:"tasksFormat,omitempty"`
+ SecretFormat string `json:"secretFormat,omitempty"`
+ ConfigFormat string `json:"configFormat,omitempty"`
+ NodesFormat string `json:"nodesFormat,omitempty"`
+ PruneFilters []string `json:"pruneFilters,omitempty"`
+ Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
+ Experimental string `json:"experimental,omitempty"`
+ StackOrchestrator string `json:"stackOrchestrator,omitempty"`
+ Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
+ CurrentContext string `json:"currentContext,omitempty"`
+ CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
+ Aliases map[string]string `json:"aliases,omitempty"`
+}
+
+// ProxyConfig contains proxy configuration settings.
+type ProxyConfig struct {
+ HTTPProxy string `json:"httpProxy,omitempty"`
+ HTTPSProxy string `json:"httpsProxy,omitempty"`
+ NoProxy string `json:"noProxy,omitempty"`
+ FTPProxy string `json:"ftpProxy,omitempty"`
+}
+
+// AuthConfig contains authorization information for connecting to a Registry.
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry.
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
+
+// KubernetesConfig contains Kubernetes orchestrator settings.
+type KubernetesConfig struct {
+ AllNamespaces string `json:"allNamespaces,omitempty"`
+}
diff --git a/vendor/github.com/cpuguy83/dockercfg/load.go b/vendor/github.com/cpuguy83/dockercfg/load.go
new file mode 100644
index 00000000..a1c4dca0
--- /dev/null
+++ b/vendor/github.com/cpuguy83/dockercfg/load.go
@@ -0,0 +1,55 @@
+package dockercfg
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+)
+
+// UserHomeConfigPath returns the path to the docker config in the current user's home dir.
+func UserHomeConfigPath() (string, error) {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return "", fmt.Errorf("user home dir: %w", err)
+ }
+
+ return filepath.Join(home, ".docker", "config.json"), nil
+}
+
+// ConfigPath returns the path to the docker cli config.
+//
+// It will either use the DOCKER_CONFIG env var if set, or the value from [UserHomeConfigPath]
+// DOCKER_CONFIG would be the dir path where `config.json` is stored, this returns the path to config.json.
+func ConfigPath() (string, error) {
+ if p := os.Getenv("DOCKER_CONFIG"); p != "" {
+ return filepath.Join(p, "config.json"), nil
+ }
+ return UserHomeConfigPath()
+}
+
+// LoadDefaultConfig loads the docker cli config from the path returned from [ConfigPath].
+func LoadDefaultConfig() (Config, error) {
+ var cfg Config
+ p, err := ConfigPath()
+ if err != nil {
+ return cfg, fmt.Errorf("config path: %w", err)
+ }
+
+ return cfg, FromFile(p, &cfg)
+}
+
+// FromFile loads config from the specified path into cfg.
+func FromFile(configPath string, cfg *Config) error {
+ f, err := os.Open(configPath)
+ if err != nil {
+ return fmt.Errorf("open config: %w", err)
+ }
+ defer f.Close()
+
+ if err = json.NewDecoder(f).Decode(&cfg); err != nil {
+ return fmt.Errorf("decode config: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 00000000..bc52e96f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 00000000..79299478
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 00000000..205c28d6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 00000000..1be8ce94
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 00000000..2e3d22f3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 00000000..aacaac6f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 00000000..f78d89fc
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 00000000..b04edb7d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 00000000..32c0e338
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/distribution/reference/.gitattributes b/vendor/github.com/distribution/reference/.gitattributes
new file mode 100644
index 00000000..d207b180
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.gitattributes
@@ -0,0 +1 @@
+*.go text eol=lf
diff --git a/vendor/github.com/distribution/reference/.gitignore b/vendor/github.com/distribution/reference/.gitignore
new file mode 100644
index 00000000..dc07e6b0
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.gitignore
@@ -0,0 +1,2 @@
+# Cover profiles
+*.out
diff --git a/vendor/github.com/distribution/reference/.golangci.yml b/vendor/github.com/distribution/reference/.golangci.yml
new file mode 100644
index 00000000..793f0bb7
--- /dev/null
+++ b/vendor/github.com/distribution/reference/.golangci.yml
@@ -0,0 +1,18 @@
+linters:
+ enable:
+ - bodyclose
+ - dupword # Checks for duplicate words in the source code
+ - gofmt
+ - goimports
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - unconvert
+ - unused
+ - vet
+ disable:
+ - errcheck
+
+run:
+ deadline: 2m
diff --git a/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
new file mode 100644
index 00000000..48f6704c
--- /dev/null
+++ b/vendor/github.com/distribution/reference/CODE-OF-CONDUCT.md
@@ -0,0 +1,5 @@
+# Code of Conduct
+
+We follow the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
+
+Please contact the [CNCF Code of Conduct Committee](mailto:conduct@cncf.io) in order to report violations of the Code of Conduct.
diff --git a/vendor/github.com/distribution/reference/CONTRIBUTING.md b/vendor/github.com/distribution/reference/CONTRIBUTING.md
new file mode 100644
index 00000000..ab219466
--- /dev/null
+++ b/vendor/github.com/distribution/reference/CONTRIBUTING.md
@@ -0,0 +1,114 @@
+# Contributing to the reference library
+
+## Community help
+
+If you need help, please ask in the [#distribution](https://cloud-native.slack.com/archives/C01GVR8SY4R) channel on CNCF community slack.
+[Click here for an invite to the CNCF community slack](https://slack.cncf.io/)
+
+## Reporting security issues
+
+The maintainers take security seriously. If you discover a security
+issue, please bring it to their attention right away!
+
+Please **DO NOT** file a public issue, instead send your report privately to
+[cncf-distribution-security@lists.cncf.io](mailto:cncf-distribution-security@lists.cncf.io).
+
+## Reporting an issue properly
+
+By following these simple rules you will get better and faster feedback on your issue.
+
+ - search the bugtracker for an already reported issue
+
+### If you found an issue that describes your problem:
+
+ - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
+ - please refrain from adding "same thing here" or "+1" comments
+ - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
+ - comment if you have some new, technical and relevant information to add to the case
+ - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
+
+### If you have not found an existing issue that describes your problem:
+
+ 1. create a new issue, with a succinct title that describes your issue:
+ - bad title: "It doesn't work with my docker"
+ - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
+ 2. copy the output of (or similar for other container tools):
+ - `docker version`
+ - `docker info`
+ - `docker exec registry --version`
+ 3. copy the command line you used to launch your Registry
+ 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
+ 5. reproduce your problem and get your docker daemon logs showing the error
+ 6. if relevant, copy your registry logs that show the error
+ 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
+ 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
+
+## Contributing Code
+
+Contributions should be made via pull requests. Pull requests will be reviewed
+by one or more maintainers or reviewers and merged when acceptable.
+
+You should follow the basic GitHub workflow:
+
+ 1. Use your own [fork](https://help.github.com/en/articles/about-forks)
+ 2. Create your [change](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
+ 3. Test your code
+ 4. [Commit](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages) your work, always [sign your commits](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#commit-messages)
+ 5. Push your change to your fork and create a [Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
+
+Refer to [containerd's contribution guide](https://github.com/containerd/project/blob/master/CONTRIBUTING.md#successful-changes)
+for tips on creating a successful contribution.
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/distribution/reference/GOVERNANCE.md b/vendor/github.com/distribution/reference/GOVERNANCE.md
new file mode 100644
index 00000000..200045b0
--- /dev/null
+++ b/vendor/github.com/distribution/reference/GOVERNANCE.md
@@ -0,0 +1,144 @@
+# distribution/reference Project Governance
+
+Distribution [Code of Conduct](./CODE-OF-CONDUCT.md) can be found here.
+
+For specific guidance on practical contribution steps please
+see our [CONTRIBUTING.md](./CONTRIBUTING.md) guide.
+
+## Maintainership
+
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+1) They share responsibility in the project's success.
+2) They have made a long-term, recurring time investment to improve the project.
+3) They spend that time doing whatever needs to be done, not necessarily what
+is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is harder to appreciate.
+It's easy to appreciate a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+
+## Reviewers
+
+A reviewer is a core role within the project.
+They share in reviewing issues and pull requests and their LGTM counts towards the
+required LGTM count to merge a code change into the project.
+
+Reviewers are part of the organization but do not have write access.
+Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
+
+## Adding maintainers
+
+Maintainers are first and foremost contributors that have shown they are
+committed to the long term success of a project. Contributors wanting to become
+maintainers are expected to be deeply involved in contributing code, pull
+request review, and triage of issues in the project for more than three months.
+
+Just contributing does not make you a maintainer, it is about building trust
+with the current maintainers of the project and being a person that they can
+depend on and trust to make decisions in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors that have
+shown regular activity on the project over the prior months. From this list,
+maintainer candidates are selected and proposed in a pull request or a
+maintainers communication channel.
+
+After a candidate has been announced to the maintainers, the existing
+maintainers are given five business days to discuss the candidate, raise
+objections and cast their vote. Votes may take place on the communication
+channel or via pull request comment. Candidates must be approved by at least 66%
+of the current maintainers by adding their vote on the mailing list. The
+reviewer role has the same process but only requires 33% of current maintainers.
+Only maintainers of the repository that the candidate is proposed for are
+allowed to vote.
+
+If a candidate is approved, a maintainer will contact the candidate to invite
+the candidate to open a pull request that adds the contributor to the
+MAINTAINERS file. The voting process may take place inside a pull request if a
+maintainer has already discussed the candidacy with the candidate and a
+maintainer is willing to be a sponsor by opening the pull request. The candidate
+becomes a maintainer once the pull request is merged.
+
+## Stepping down policy
+
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+
+## Removal of inactive maintainers
+
+Similar to the procedure for adding new maintainers, existing maintainers can
+be removed from the list if they do not show significant activity on the
+project. Periodically, the maintainers review the list of maintainers and their
+activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a neutral
+person will contact the maintainer to ask if they want to continue being
+a maintainer. If the maintainer decides to step down as a maintainer, they
+open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to remain a maintainer, but is unable to perform the
+required duties they can be removed with a vote of at least 66% of the current
+maintainers. In this case, maintainers should first propose the change to
+maintainers via the maintainers communication channel, then open a pull request
+for voting. The voting period is five business days. The voting pull request
+should not come as a surpise to any maintainer and any discussion related to
+performance must not be discussed on the pull request.
+
+## How are decisions made?
+
+Docker distribution is an open-source project with an open design philosophy.
+This means that the repository is the source of truth for EVERY aspect of the
+project, including its philosophy, design, road map, and APIs. *If it's part of
+the project, it's in the repo. If it's in the repo, it's part of the project.*
+
+As a result, all decisions can be expressed as changes to the repository. An
+implementation change is a change to the source code. An API change is a change
+to the API specification. A philosophy change is a change to the philosophy
+manifesto, and so on.
+
+All decisions affecting distribution, big and small, follow the same 3 steps:
+
+* Step 1: Open a pull request. Anyone can do this.
+
+* Step 2: Discuss the pull request. Anyone can do this.
+
+* Step 3: Merge or refuse the pull request. Who does this depends on the nature
+of the pull request and which areas of the project it affects.
+
+## Helping contributors with the DCO
+
+The [DCO or `Sign your work`](./CONTRIBUTING.md#sign-your-work)
+requirement is not intended as a roadblock or speed bump.
+
+Some contributors are not as familiar with `git`, or have used a web
+based editor, and thus asking them to `git commit --amend -s` is not the best
+way forward.
+
+In this case, maintainers can update the commits based on clause (c) of the DCO.
+The most trivial way for a contributor to allow the maintainer to do this, is to
+add a DCO signature in a pull requests's comment, or a maintainer can simply
+note that the change is sufficiently trivial that it does not substantially
+change the existing contribution - i.e., a spelling change.
+
+When you add someone's DCO, please also add your own to keep a log.
+
+## I'm a maintainer. Should I make pull requests too?
+
+Yes. Nobody should ever push to master directly. All changes should be
+made through a pull request.
+
+## Conflict Resolution
+
+If you have a technical dispute that you feel has reached an impasse with a
+subset of the community, any contributor may open an issue, specifically
+calling for a resolution vote of the current core maintainers to resolve the
+dispute. The same voting quorums required (2/3) for adding and removing
+maintainers will apply to conflict resolution.
diff --git a/vendor/github.com/distribution/reference/LICENSE b/vendor/github.com/distribution/reference/LICENSE
new file mode 100644
index 00000000..e06d2081
--- /dev/null
+++ b/vendor/github.com/distribution/reference/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/distribution/reference/MAINTAINERS b/vendor/github.com/distribution/reference/MAINTAINERS
new file mode 100644
index 00000000..9e0a60c8
--- /dev/null
+++ b/vendor/github.com/distribution/reference/MAINTAINERS
@@ -0,0 +1,26 @@
+# Distribution project maintainers & reviewers
+#
+# See GOVERNANCE.md for maintainer versus reviewer roles
+#
+# MAINTAINERS (cncf-distribution-maintainers@lists.cncf.io)
+# GitHub ID, Name, Email address
+"chrispat","Chris Patterson","chrispat@github.com"
+"clarkbw","Bryan Clark","clarkbw@github.com"
+"corhere","Cory Snider","csnider@mirantis.com"
+"deleteriousEffect","Hayley Swimelar","hswimelar@gitlab.com"
+"heww","He Weiwei","hweiwei@vmware.com"
+"joaodrp","João Pereira","jpereira@gitlab.com"
+"justincormack","Justin Cormack","justin.cormack@docker.com"
+"squizzi","Kyle Squizzato","ksquizzato@mirantis.com"
+"milosgajdos","Milos Gajdos","milosthegajdos@gmail.com"
+"sargun","Sargun Dhillon","sargun@sargun.me"
+"wy65701436","Wang Yan","wangyan@vmware.com"
+"stevelasker","Steve Lasker","steve.lasker@microsoft.com"
+#
+# REVIEWERS
+# GitHub ID, Name, Email address
+"dmcgowan","Derek McGowan","derek@mcgstyle.net"
+"stevvooe","Stephen Day","stevvooe@gmail.com"
+"thajeztah","Sebastiaan van Stijn","github@gone.nl"
+"DavidSpek", "David van der Spek", "vanderspek.david@gmail.com"
+"Jamstah", "James Hewitt", "james.hewitt@gmail.com"
diff --git a/vendor/github.com/distribution/reference/Makefile b/vendor/github.com/distribution/reference/Makefile
new file mode 100644
index 00000000..c78576b7
--- /dev/null
+++ b/vendor/github.com/distribution/reference/Makefile
@@ -0,0 +1,25 @@
+# Project packages.
+PACKAGES=$(shell go list ./...)
+
+# Flags passed to `go test`
+BUILDFLAGS ?=
+TESTFLAGS ?=
+
+.PHONY: all build test coverage
+.DEFAULT: all
+
+all: build
+
+build: ## no binaries to build, so just check compilation suceeds
+ go build ${BUILDFLAGS} ./...
+
+test: ## run tests
+ go test ${TESTFLAGS} ./...
+
+coverage: ## generate coverprofiles from the unit tests
+ rm -f coverage.txt
+ go test ${TESTFLAGS} -cover -coverprofile=cover.out ./...
+
+.PHONY: help
+help:
+ @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_\/%-]+:.*?##/ { printf " \033[36m%-27s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md
new file mode 100644
index 00000000..172a02e0
--- /dev/null
+++ b/vendor/github.com/distribution/reference/README.md
@@ -0,0 +1,30 @@
+# Distribution reference
+
+Go library to handle references to container images.
+
+
+
+[](https://github.com/distribution/reference/actions?query=workflow%3ACI)
+[](https://pkg.go.dev/github.com/distribution/reference)
+[](LICENSE)
+[](https://codecov.io/gh/distribution/reference)
+[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
+
+This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
+
+## Contribution
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
+issues, fixes, and patches to this project.
+
+## Communication
+
+For async communication and long running discussions please use issues and pull requests on the github repo.
+This will be the best place to discuss design and implementation.
+
+For sync communication we have a #distribution channel in the [CNCF Slack](https://slack.cncf.io/)
+that everyone is welcome to join and chat about development.
+
+## Licenses
+
+The distribution codebase is released under the [Apache 2.0 license](LICENSE).
diff --git a/vendor/github.com/distribution/reference/SECURITY.md b/vendor/github.com/distribution/reference/SECURITY.md
new file mode 100644
index 00000000..aaf983c0
--- /dev/null
+++ b/vendor/github.com/distribution/reference/SECURITY.md
@@ -0,0 +1,7 @@
+# Security Policy
+
+## Reporting a Vulnerability
+
+The maintainers take security seriously. If you discover a security issue, please bring it to their attention right away!
+
+Please DO NOT file a public issue, instead send your report privately to cncf-distribution-security@lists.cncf.io.
diff --git a/vendor/github.com/distribution/reference/distribution-logo.svg b/vendor/github.com/distribution/reference/distribution-logo.svg
new file mode 100644
index 00000000..cc9f4073
--- /dev/null
+++ b/vendor/github.com/distribution/reference/distribution-logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/vendor/github.com/distribution/reference/helpers.go b/vendor/github.com/distribution/reference/helpers.go
new file mode 100644
index 00000000..d10c7ef8
--- /dev/null
+++ b/vendor/github.com/distribution/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See [path.Match] for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go
new file mode 100644
index 00000000..f4128314
--- /dev/null
+++ b/vendor/github.com/distribution/reference/normalize.go
@@ -0,0 +1,255 @@
+package reference
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // legacyDefaultDomain is the legacy domain for Docker Hub (which was
+ // originally named "the Docker Index"). This domain is still used for
+ // authentication and image search, which were part of the "v1" Docker
+ // registry specification.
+ //
+ // This domain will continue to be supported, but there are plans to consolidate
+ // legacy domains to new "canonical" domains. Once those domains are decided
+ // on, we must update the normalization functions, but preserve compatibility
+ // with existing installs, clients, and user configuration.
+ legacyDefaultDomain = "index.docker.io"
+
+ // defaultDomain is the default domain used for images on Docker Hub.
+ // It is used to normalize "familiar" names to canonical names, for example,
+ // to convert "ubuntu" to "docker.io/library/ubuntu:latest".
+ //
+ // Note that actual domain of Docker Hub's registry is registry-1.docker.io.
+ // This domain will continue to be supported, but there are plans to consolidate
+ // legacy domains to new "canonical" domains. Once those domains are decided
+ // on, we must update the normalization functions, but preserve compatibility
+ // with existing installs, clients, and user configuration.
+ defaultDomain = "docker.io"
+
+ // officialRepoPrefix is the namespace used for official images on Docker Hub.
+ // It is used to normalize "familiar" names to canonical names, for example,
+ // to convert "ubuntu" to "docker.io/library/ubuntu:latest".
+ officialRepoPrefix = "library/"
+
+ // defaultTag is the default tag if no tag is provided.
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remote string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remote = remainder[:tagSep]
+ } else {
+ remote = remainder
+ }
+ if strings.ToLower(remote) != remote {
+ return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remote)
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// namedTaggedDigested is a reference that has both a tag and a digest.
+type namedTaggedDigested interface {
+ NamedTagged
+ Digested
+}
+
+// ParseDockerRef normalizes the image reference following the docker convention,
+// which allows for references to contain both a tag and a digest. It returns a
+// reference that is either tagged or digested. For references containing both
+// a tag and a digest, it returns a digested reference. For example, the following
+// reference:
+//
+// docker.io/library/busybox:latest@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// Is returned as a digested reference (with the ":latest" tag removed):
+//
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// References that are already "tagged" or "digested" are returned unmodified:
+//
+// // Already a digested reference
+// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa
+//
+// // Already a named reference
+// docker.io/library/busybox:latest
+func ParseDockerRef(ref string) (Named, error) {
+ named, err := ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, err
+ }
+ if canonical, ok := named.(namedTaggedDigested); ok {
+ // The reference is both tagged and digested; only return digested.
+ newNamed, err := WithName(canonical.Name())
+ if err != nil {
+ return nil, err
+ }
+ return WithDigest(newNamed, canonical.Digest())
+ }
+ return TagNameOnly(named), nil
+}
+
+// splitDockerDomain splits a repository name to domain and remote-name.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remoteName string) {
+ maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/")
+ if !ok {
+ // Fast-path for single element ("familiar" names), such as "ubuntu"
+ // or "ubuntu:latest". Familiar names must be handled separately, to
+ // prevent them from being handled as "hostname:port".
+ //
+ // Canonicalize them as "docker.io/library/name[:tag]"
+
+ // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain.
+ return defaultDomain, officialRepoPrefix + name
+ }
+
+ switch {
+ case maybeDomain == localhost:
+ // localhost is a reserved namespace and always considered a domain.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case maybeDomain == legacyDefaultDomain:
+ // canonicalize the Docker Hub and legacy "Docker Index" domains.
+ domain, remoteName = defaultDomain, maybeRemoteName
+ case strings.ContainsAny(maybeDomain, ".:"):
+ // Likely a domain or IP-address:
+ //
+ // - contains a "." (e.g., "example.com" or "127.0.0.1")
+ // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000")
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case strings.ToLower(maybeDomain) != maybeDomain:
+ // Uppercase namespaces are not allowed, so if the first element
+ // is not lowercase, we assume it to be a domain-name.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ default:
+ // None of the above: it's not a domain, so use the default, and
+ // use the name input the remote-name.
+ domain, remoteName = defaultDomain, name
+ }
+
+ if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') {
+ // Canonicalize "familiar" names, but only on Docker Hub, not
+ // on other domains:
+ //
+ // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]"
+ remoteName = officialRepoPrefix + remoteName
+ }
+
+ return domain, remoteName
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/"
+ if strings.HasPrefix(repo.path, officialRepoPrefix) {
+ // TODO(thaJeztah): this check may be too strict, as it assumes the
+ // "library/" namespace does not have nested namespaces. While this
+ // is true (currently), technically it would be possible for Docker
+ // Hub to use those (e.g. "library/distros/ubuntu:latest").
+ // See https://github.com/distribution/distribution/pull/3769#issuecomment-1302031785.
+ if remainder := strings.TrimPrefix(repo.path, officialRepoPrefix); !strings.ContainsRune(remainder, '/') {
+ repo.path = remainder
+ }
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go
new file mode 100644
index 00000000..900398bd
--- /dev/null
+++ b/vendor/github.com/distribution/reference/reference.go
@@ -0,0 +1,432 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] remote-name
+// domain := host [':' port-number]
+// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A
+// domain-name := domain-component ['.' domain-component]*
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// path (or "remote-name") := path-component ['/' path-component]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name.
+ RepositoryNameTotalLengthMax = 255
+
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ //
+ // Deprecated: use [RepositoryNameTotalLengthMax] instead.
+ NameTotalLengthMax = RepositoryNameTotalLengthMax
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the [Named] reference.
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the [Named] reference.
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+// splitDomain splits a named reference into a hostname and path string.
+// If no valid hostname is found, the hostname is empty and the full value
+// is returned as name
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ if len(repo.path) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(match[2]) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ repo := repository{}
+ if r, ok := ref.(namedRepository); ok {
+ repo.domain, repo.path = r.Domain(), r.Path()
+ } else {
+ repo.domain, repo.path = splitDomain(ref.Name())
+ }
+ return repo
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/distribution/reference/regexp.go b/vendor/github.com/distribution/reference/regexp.go
new file mode 100644
index 00000000..65bc49d7
--- /dev/null
+++ b/vendor/github.com/distribution/reference/regexp.go
@@ -0,0 +1,163 @@
+package reference
+
+import (
+ "regexp"
+ "strings"
+)
+
+// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:").
+var DigestRegexp = regexp.MustCompile(digestPat)
+
+// DomainRegexp matches hostname or IP-addresses, optionally including a port
+// number. It defines the structure of potential domain components that may be
+// part of image names. This is purposely a subset of what is allowed by DNS to
+// ensure backwards compatibility with Docker image names. It may be a subset of
+// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
+// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
+// addresses such as IPv4-Mapped).
+//
+// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
+var DomainRegexp = regexp.MustCompile(domainAndPort)
+
+// IdentifierRegexp is the format for string identifier used as a
+// content addressable identifier using sha256. These identifiers
+// are like digests without the algorithm, since sha256 is used.
+var IdentifierRegexp = regexp.MustCompile(identifier)
+
+// NameRegexp is the format for the name component of references, including
+// an optional domain and port, but without tag or digest suffix.
+var NameRegexp = regexp.MustCompile(namePat)
+
+// ReferenceRegexp is the full supported format of a reference. The regexp
+// is anchored and has capturing groups for name, tag, and digest
+// components.
+var ReferenceRegexp = regexp.MustCompile(referencePat)
+
+// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
+//
+// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
+var TagRegexp = regexp.MustCompile(tag)
+
+const (
+ // alphanumeric defines the alphanumeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphanumeric = `[a-z0-9]+`
+
+ // separator defines the separators allowed to be embedded in name
+ // components. This allows one period, one or two underscore and multiple
+ // dashes. Repeated dashes and underscores are intentionally treated
+ // differently. In order to support valid hostnames as name components,
+ // supporting repeated dash was added. Additionally double underscore is
+ // now allowed as a separator to loosen the restriction for previously
+ // supported names.
+ separator = `(?:[._]|__|[-]+)`
+
+ // localhost is treated as a special value for domain-name. Any other
+ // domain-name without a "." or a ":port" are considered a path component.
+ localhost = `localhost`
+
+ // domainNameComponent restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp.
+ domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`
+
+ // optionalPort matches an optional port-number including the port separator
+ // (e.g. ":80").
+ optionalPort = `(?::[0-9]+)?`
+
+ // tag matches valid tag names. From docker/docker:graph/tags.go.
+ tag = `[\w][\w.-]{0,127}`
+
+ // digestPat matches well-formed digests, including algorithm (e.g. "sha256:").
+ //
+ // TODO(thaJeztah): this should follow the same rules as https://pkg.go.dev/github.com/opencontainers/go-digest@v1.0.0#DigestRegexp
+ // so that go-digest defines the canonical format. Note that the go-digest is
+ // more relaxed:
+ // - it allows multiple algorithms (e.g. "sha256+b64:") to allow
+ // future expansion of supported algorithms.
+ // - it allows the "" value to use urlsafe base64 encoding as defined
+ // in [rfc4648, section 5].
+ //
+ // [rfc4648, section 5]: https://www.rfc-editor.org/rfc/rfc4648#section-5.
+ digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`
+
+ // identifier is the format for a content addressable identifier using sha256.
+ // These identifiers are like digests without the algorithm, since sha256 is used.
+ identifier = `([a-f0-9]{64})`
+
+ // ipv6address are enclosed between square brackets and may be represented
+ // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format
+ // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as
+ // IPv4-Mapped are deliberately excluded.
+ ipv6address = `\[(?:[a-fA-F0-9:]+)\]`
+)
+
+var (
+ // domainName defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names. This includes IPv4 addresses on decimal format.
+ domainName = domainNameComponent + anyTimes(`\.`+domainNameComponent)
+
+ // host defines the structure of potential domains based on the URI
+ // Host subcomponent on rfc3986. It may be a subset of DNS domain name,
+ // or an IPv4 address in decimal format, or an IPv6 address between square
+ // brackets (excluding zone identifiers as defined by rfc6874 or special
+ // addresses such as IPv4-Mapped).
+ host = `(?:` + domainName + `|` + ipv6address + `)`
+
+ // allowed by the URI Host subcomponent on rfc3986 to ensure backwards
+ // compatibility with Docker image names.
+ domainAndPort = host + optionalPort
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = regexp.MustCompile(anchored(tag))
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = regexp.MustCompile(anchored(digestPat))
+
+ // pathComponent restricts path-components to start with an alphanumeric
+ // character, with following parts able to be separated by a separator
+ // (one period, one or two underscore and multiple dashes).
+ pathComponent = alphanumeric + anyTimes(separator+alphanumeric)
+
+ // remoteName matches the remote-name of a repository. It consists of one
+ // or more forward slash (/) delimited path-components:
+ //
+ // pathComponent[[/pathComponent] ...] // e.g., "library/ubuntu"
+ remoteName = pathComponent + anyTimes(`/`+pathComponent)
+ namePat = optional(domainAndPort+`/`) + remoteName
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = regexp.MustCompile(anchored(optional(capture(domainAndPort), `/`), capture(remoteName)))
+
+ referencePat = anchored(capture(namePat), optional(`:`, capture(tag)), optional(`@`, capture(digestPat)))
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = regexp.MustCompile(anchored(identifier))
+)
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...string) string {
+ return `(?:` + strings.Join(res, "") + `)?`
+}
+
+// anyTimes wraps the expression in a non-capturing group that can occur
+// any number of times.
+func anyTimes(res ...string) string {
+ return `(?:` + strings.Join(res, "") + `)*`
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...string) string {
+ return `(` + strings.Join(res, "") + `)`
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...string) string {
+ return `^` + strings.Join(res, "") + `$`
+}
diff --git a/vendor/github.com/distribution/reference/sort.go b/vendor/github.com/distribution/reference/sort.go
new file mode 100644
index 00000000..416c37b0
--- /dev/null
+++ b/vendor/github.com/distribution/reference/sort.go
@@ -0,0 +1,75 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+package reference
+
+import (
+ "sort"
+)
+
+// Sort sorts string references preferring higher information references.
+//
+// The precedence is as follows:
+//
+// 1. [Named] + [Tagged] + [Digested] (e.g., "docker.io/library/busybox:latest@sha256:")
+// 2. [Named] + [Tagged] (e.g., "docker.io/library/busybox:latest")
+// 3. [Named] + [Digested] (e.g., "docker.io/library/busybo@sha256:")
+// 4. [Named] (e.g., "docker.io/library/busybox")
+// 5. [Digested] (e.g., "docker.io@sha256:")
+// 6. Parse error
+func Sort(references []string) []string {
+ var prefs []Reference
+ var bad []string
+
+ for _, ref := range references {
+ pref, err := ParseAnyReference(ref)
+ if err != nil {
+ bad = append(bad, ref)
+ } else {
+ prefs = append(prefs, pref)
+ }
+ }
+ sort.Slice(prefs, func(a, b int) bool {
+ ar := refRank(prefs[a])
+ br := refRank(prefs[b])
+ if ar == br {
+ return prefs[a].String() < prefs[b].String()
+ }
+ return ar < br
+ })
+ sort.Strings(bad)
+ var refs []string
+ for _, pref := range prefs {
+ refs = append(refs, pref.String())
+ }
+ return append(refs, bad...)
+}
+
+func refRank(ref Reference) uint8 {
+ if _, ok := ref.(Named); ok {
+ if _, ok = ref.(Tagged); ok {
+ if _, ok = ref.(Digested); ok {
+ return 1
+ }
+ return 2
+ }
+ if _, ok = ref.(Digested); ok {
+ return 3
+ }
+ return 4
+ }
+ return 5
+}
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
new file mode 100644
index 00000000..88032def
--- /dev/null
+++ b/vendor/github.com/docker/docker/AUTHORS
@@ -0,0 +1,2486 @@
+# File @generated by hack/generate-authors.sh. DO NOT EDIT.
+# This file lists all contributors to the repository.
+# See hack/generate-authors.sh to make modifications.
+
+7sunarni <710720732@qq.com>
+Aanand Prasad
+Aarni Koskela
+Aaron Davidson
+Aaron Feng
+Aaron Hnatiw
+Aaron Huslage
+Aaron L. Xu
+Aaron Lehmann
+Aaron Welch
+Aaron Yoshitake
+Abdur Rehman
+Abel Muiño
+Abhijeet Kasurde
+Abhinandan Prativadi
+Abhinav Ajgaonkar
+Abhishek Chanda
+Abhishek Sharma
+Abin Shahab
+Abirdcfly
+Ada Mancini
+Adam Avilla
+Adam Dobrawy
+Adam Eijdenberg
+Adam Kunk
+Adam Lamers
+Adam Miller
+Adam Mills
+Adam Pointer
+Adam Simon
+Adam Singer
+Adam Thornton
+Adam Walz
+Adam Williams
+AdamKorcz
+Addam Hardy
+Aditi Rajagopal
+Aditya
+Adnan Khan
+Adolfo Ochagavía
+Adria Casas
+Adrian Moisey
+Adrian Mouat
+Adrian Oprea
+Adrien Folie
+Adrien Gallouët
+Ahmed Kamal
+Ahmet Alp Balkan
+Aidan Feldman
+Aidan Hobson Sayers
+AJ Bowen
+Ajey Charantimath
+ajneu
+Akash Gupta
+Akhil Mohan
+Akihiro Matsushima
+Akihiro Suda
+Akim Demaille
+Akira Koyasu
+Akshay Karle
+Akshay Moghe
+Al Tobey
+alambike
+Alan Hoyle
+Alan Scherger
+Alan Thompson
+Alano Terblanche
+Albert Callarisa
+Albert Zhang
+Albin Kerouanton
+Alec Benson
+Alejandro González Hevia
+Aleksa Sarai
+Aleksandr Chebotov
+Aleksandrs Fadins
+Alena Prokharchyk
+Alessandro Boch
+Alessio Biancalana
+Alex Chan
+Alex Chen
+Alex Coventry
+Alex Crawford
+Alex Ellis
+Alex Gaynor
+Alex Goodman
+Alex Nordlund
+Alex Olshansky
+Alex Samorukov
+Alex Stockinger
+Alex Warhawk
+Alexander Artemenko
+Alexander Boyd
+Alexander Larsson
+Alexander Midlash
+Alexander Morozov
+Alexander Polakov
+Alexander Shopov
+Alexandre Beslic
+Alexandre Garnier
+Alexandre González
+Alexandre Jomin
+Alexandru Sfirlogea
+Alexei Margasov
+Alexey Guskov
+Alexey Kotlyarov
+Alexey Shamrin
+Alexis Ries
+Alexis Thomas
+Alfred Landrum
+Ali Dehghani
+Alicia Lauerman
+Alihan Demir
+Allen Madsen
+Allen Sun
+almoehi
+Alvaro Saurin
+Alvin Deng
+Alvin Richards
+amangoel
+Amen Belayneh
+Ameya Gawde
+Amir Goldstein
+AmirBuddy
+Amit Bakshi
+Amit Krishnan
+Amit Shukla
+Amr Gawish
+Amy Lindburg
+Anand Patil
+AnandkumarPatel
+Anatoly Borodin
+Anca Iordache
+Anchal Agrawal
+Anda Xu
+Anders Janmyr
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky
+Andrea Denisse Gómez
+Andrea Luzzardi
+Andrea Turli
+Andreas Elvers
+Andreas Köhler
+Andreas Savvides
+Andreas Tiefenthaler
+Andrei Gherzan
+Andrei Ushakov
+Andrei Vagin
+Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me>
+Andrew C. Bodine
+Andrew Clay Shafer
+Andrew Duckworth
+Andrew France
+Andrew Gerrand
+Andrew Guenther
+Andrew He
+Andrew Hsu
+Andrew Kim
+Andrew Kuklewicz
+Andrew Macgregor
+Andrew Macpherson
+Andrew Martin
+Andrew McDonnell
+Andrew Munsell
+Andrew Pennebaker
+Andrew Po
+Andrew Weiss
+Andrew Williams
+Andrews Medina
+Andrey Kolomentsev
+Andrey Petrov
+Andrey Stolbovsky
+André Martins
+Andrés Maldonado
+Andy Chambers
+andy diller
+Andy Goldstein
+Andy Kipp
+Andy Lindeman
+Andy Rothfusz
+Andy Smith
+Andy Wilson
+Andy Zhang
+Aneesh Kulkarni
+Anes Hasicic
+Angel Velazquez
+Anil Belur
+Anil Madhavapeddy
+Ankit Jain
+Ankush Agarwal
+Anonmily
+Anran Qiao
+Anshul Pundir
+Anthon van der Neut
+Anthony Baire
+Anthony Bishopric
+Anthony Dahanne
+Anthony Sottile
+Anton Löfgren
+Anton Nikitin
+Anton Polonskiy
+Anton Tiurin
+Antonio Aguilar
+Antonio Murdaca
+Antonis Kalipetis
+Antony Messerli