diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a9bf497 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +.idea/ +.vscode/ +tests/ +.DS_Store +.env +.env.local +.env.development.local +.env.test.local +.env.production.local \ No newline at end of file diff --git a/nomad/origin/orchestrator.hcl b/nomad/origin/orchestrator.hcl index 1e7ee1e..5695a16 100644 --- a/nomad/origin/orchestrator.hcl +++ b/nomad/origin/orchestrator.hcl @@ -24,7 +24,10 @@ job "orchestrator" { port = "orchestrator" } } - + service { + name = "orchestrator-proxy" + port = "${proxy_port}" + } task "start" { driver = "raw_exec" @@ -41,6 +44,11 @@ job "orchestrator" { TEMPLATE_AWS_BUCKET_NAME = "${BUCKET_FC_TEMPLATE}" AWS_REGION = "${AWSREGION}" USE_FIRECRACKER_NATIVE_DIFF = true + ORCHESTRATOR_SERVICES = "orchestrator,template-manager" + ARTIFACTS_REGISTRY_PROVIDER = "AWS_ECR" + AWS_DOCKER_REPOSITORY_NAME = "e2bdev/base" + + } config { diff --git a/packages/api/go.mod b/packages/api/go.mod index a6210d7..5a859d7 100644 --- a/packages/api/go.mod +++ b/packages/api/go.mod @@ -6,10 +6,10 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/dchest/uniuri v1.2.0 // indirect github.com/e2b-dev/infra/packages/shared v0.0.0 - github.com/getkin/kin-openapi v0.131.0 + github.com/getkin/kin-openapi v0.132.0 github.com/gin-contrib/cors v1.6.0 github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 - github.com/gin-gonic/gin v1.10.0 + github.com/gin-gonic/gin v1.10.1 github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/gogo/status v1.1.1 // https://github.com/grafana/loki/issues/2826. This is the equivalent of the main branch at 2023/11/27 (d62d4e37d1f3dba83cf10a1f6db82830794e1c05) @@ -21,9 +21,9 @@ require ( github.com/oapi-codegen/runtime v1.1.1 github.com/orcaman/concurrent-map/v2 v2.0.1 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.57.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 - go.opentelemetry.io/otel v1.35.0 - go.opentelemetry.io/otel/trace v1.35.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 + go.opentelemetry.io/otel v1.36.0 + go.opentelemetry.io/otel/trace v1.36.0 go.uber.org/zap v1.27.0 ) @@ -39,12 +39,12 @@ require ( github.com/redis/go-redis/v9 v9.7.3 github.com/stretchr/testify v1.10.0 golang.org/x/mod v0.24.0 - google.golang.org/grpc v1.71.0 + google.golang.org/grpc v1.72.1 ) require ( ariga.io/atlas v0.15.0 // indirect - connectrpc.com/connect v1.16.2 // indirect + connectrpc.com/connect v1.18.1 // indirect entgo.io/ent v0.12.5 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect @@ -66,13 +66,13 @@ require ( github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/bytedance/sonic v1.12.4 // indirect - github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -85,7 +85,7 @@ require ( github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-kit/log v0.2.1 // indirect @@ -109,8 +109,8 @@ require ( github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect - github.com/hashicorp/consul/api latest // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/hashicorp/consul/api v1.32.1 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect @@ -131,7 +131,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -189,42 +189,42 @@ require ( go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 // indirect go.opentelemetry.io/collector/semconv v0.81.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.9.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect go.opentelemetry.io/otel/log v0.10.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.6.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect - golang.org/x/arch v0.11.0 // indirect - golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/arch v0.16.0 // indirect + golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.32.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect + golang.org/x/tools v0.33.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect - github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gin-contrib/sse v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.22.1 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/go-playground/validator/v10 v10.26.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 // indirect @@ -237,19 +237,19 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/posthog/posthog-go v0.0.0-20230801140217-d607812dee69 github.com/ugorji/go/codec v1.2.12 // indirect - go.opentelemetry.io/otel/metric v1.35.0 - golang.org/x/crypto v0.37.0 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sync v0.13.0 - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/sync v0.14.0 + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -274,4 +274,3 @@ replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0. // https://github.com/grafana/loki/issues/2826 replace github.com/grafana/loki/pkg/push => github.com/grafana/loki/pkg/push v0.0.0-20231124145642-d62d4e37d1f3 - diff --git a/packages/api/go.sum b/packages/api/go.sum index c56d7d6..d7ca6c8 100644 --- a/packages/api/go.sum +++ b/packages/api/go.sum @@ -36,6 +36,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= @@ -67,6 +69,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go/v2 v2.33.1/go.mod h1:cb1Ss8Sz8PZNdfvEBwkMAdRhoyB6/HiB6o3We5ZIcE4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -84,6 +88,7 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= @@ -104,6 +109,7 @@ github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGn github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= @@ -121,6 +127,7 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.321 h1:iXwFLxWjZPjYqjPq0EcCs46xX7oDLEELte1+BzgpKk8= github.com/aws/aws-sdk-go v1.44.321/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= @@ -172,9 +179,11 @@ github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMU github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytedance/sonic v1.12.4 h1:9Csb3c9ZJhfUWeMtpCDCq6BUoH5ogfDFLUgQ/jG+R0k= github.com/bytedance/sonic v1.12.4/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -184,6 +193,7 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -194,6 +204,7 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -250,8 +261,10 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowchartsman/retry v1.2.0/go.mod h1:+sfx8OgCCiAr3t5jh2Gk+T0fRTI+k52edaYxURQxY64= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= @@ -259,10 +272,14 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/getkin/kin-openapi v0.122.0 h1:WB9Jbl0Hp/T79/JF9xlSW5Kl9uYdk/AWD0yAd9HOM10= github.com/getkin/kin-openapi v0.122.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= +github.com/getkin/kin-openapi v0.131.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= +github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/cors v1.6.0 h1:0Z7D/bVhE6ja07lI8CTjTonp6SB07o8bNuFyRbsBUQg= github.com/gin-contrib/cors v1.6.0/go.mod h1:cI+h6iOAyxKRtUtC6iF/Si1KSFvGm/gK+kshxlCi8ro= @@ -270,9 +287,14 @@ github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4 h1:Z9J0PVIt1PuibO github.com/gin-contrib/size v0.0.0-20230212012657-e14a14094dc4/go.mod h1:CEPcgZiz8998l9E8fDm16h8UfHRL7b+5oG0j/0koeVw= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= +github.com/gin-contrib/zap v1.1.4/go.mod h1:7lgEpe91kLbeJkwBTPgtVBy4zMa6oSBEcvj662diqKQ= github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -287,6 +309,7 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -294,11 +317,13 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -306,30 +331,37 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= @@ -342,8 +374,10 @@ github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91 github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-redis/cache/v8 v8.4.4 h1:Rm0wZ55X22BA2JMqVtRQNHYyzDd0I5f+Ec/C9Xx3mXY= github.com/go-redis/cache/v8 v8.4.4/go.mod h1:JM6CkupsPvAu/LYEVGQy6UB4WDAzQSXkR0lUCbeIcKc= +github.com/go-redis/cache/v9 v9.0.0/go.mod h1:cMwi1N8ASBOufbIvk7cdXe2PbPjK/WMRL95FFHWsSgI= github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= @@ -382,6 +416,7 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= @@ -396,6 +431,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -433,6 +470,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -446,8 +484,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -462,6 +503,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -488,14 +530,18 @@ github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= github.com/hashicorp/consul/api v1.26.1/go.mod h1:B4sQTeaSO16NtynqrAdwOlahJ7IUDZM9cj2420xYL8A= +github.com/hashicorp/consul/api v1.32.1/go.mod h1:mXUWLnxftwTmDv4W3lzxYCPD199iNLLUyLfLGFJbtl4= github.com/hashicorp/consul/sdk v0.15.0 h1:2qK9nDrr4tiJKRoxPGhm6B7xJjLVIQqkjiab2M4aKjU= github.com/hashicorp/consul/sdk v0.15.0/go.mod h1:r/OmRRPbHOe0yxNahLw7G9x5WG17E1BIECMtCjcPSNo= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -508,6 +554,7 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -537,6 +584,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -554,6 +602,7 @@ github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -562,8 +611,13 @@ github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/ionos-cloud/sdk-go/v6 v6.1.8 h1:493wE/BkZxJf7x79UCE0cYGPZoqQcPiEBALvt7uVGY0= github.com/ionos-cloud/sdk-go/v6 v6.1.8/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jellydator/ttlcache/v3 v3.3.1-0.20250207140243-aefc35918359/go.mod h1:aqa3CYl8S7MwpMXtFH3uNIEEfOjcn1MUNO+bQIGbFAQ= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -594,9 +648,11 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= @@ -648,10 +704,12 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -669,6 +727,7 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -695,6 +754,9 @@ github.com/oapi-codegen/gin-middleware v1.0.1 h1:903hkcyMcM/h6ooHS7t/2ad973BY0xv github.com/oapi-codegen/gin-middleware v1.0.1/go.mod h1:JDMxGX/rErQs2VV0XAVo1sD6sA0EVUMvFSPhgOLt9mE= github.com/oapi-codegen/runtime v1.1.0 h1:rJpoNUawn5XTvekgfkvSZr0RqEnoYpFkyvrzfWeFKWM= github.com/oapi-codegen/runtime v1.1.0/go.mod h1:BeSfBkWWWnAnGdyS+S/GnlbmHKzf8/hwkvelJZDeKA8= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -702,17 +764,34 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing-contrib/go-stdlib v1.1.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.1.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -725,15 +804,19 @@ github.com/ovh/go-ovh v1.4.1/go.mod h1:6bL6pPyUT7tBfI0pqOegJgRjgjuO+mOo+MyXd1EEC github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -750,6 +833,7 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/posthog/posthog-go v0.0.0-20230801140217-d607812dee69 h1:01dHVodha5BzrMtVmcpPeA4VYbZEsTXQ6m4123zQXJk= github.com/posthog/posthog-go v0.0.0-20230801140217-d607812dee69/go.mod h1:migYMxlAqcnQy+3eN8mcL0b2tpKy6R+8Zc0lxwk4dKM= +github.com/pressly/goose/v3 v3.24.2/go.mod h1:kjefwFB0eR4w30Td2Gj2Mznyw94vSP+2jJYkOVNbD1k= github.com/prometheus/alertmanager v0.26.0 h1:uOMJWfIwJguc3NaM3appWNbbrh6G/OjvaHMk22aBBYc= github.com/prometheus/alertmanager v0.26.0/go.mod h1:rVcnARltVjavgVaNnmevxK7kOn7IZavyf0KNgHkbEpU= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -760,12 +844,14 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -773,6 +859,7 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.10.1-0.20230714054209-2f4150c63f97 h1:oHcfzdJnM/SFppy2aUlvomk37GI33x9vgJULihE5Dt8= @@ -784,8 +871,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510 h1:6ksZ7t1hNOzGPPs8DK7SvXQf6UfWzi+W5Z7PCBl8gx4= github.com/prometheus/prometheus v0.47.2-0.20231010075449-4b9c19fe5510/go.mod h1:UC0TwJiF90m2T3iYPQBKnGu8gv3s55dF/EgpTq8gyvo= +github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -804,17 +894,20 @@ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -852,6 +945,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -889,12 +983,14 @@ github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3k github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= @@ -902,6 +998,7 @@ github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= @@ -911,43 +1008,70 @@ go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 h1:8PzrQFk3oKiT1Sd5EmNEcagdMyt1KcBy5/OyF5He5gY= go.opentelemetry.io/collector/pdata v1.0.0-rcv0015/go.mod h1:I1PqyHJlsXjANC73tp43nDId7/jiv82NoZZ6uS0xdwM= go.opentelemetry.io/collector/semconv v0.81.0 h1:lCYNNo3powDvFIaTPP2jDKIrBiV1T92NK4QgL/aHYXw= go.opentelemetry.io/collector/semconv v0.81.0/go.mod h1:TlYPtzvsXyHOgr5eATi43qEMqwSmIziivJB2uctKswo= +go.opentelemetry.io/contrib/bridges/otelzap v0.9.0/go.mod h1:T1Z1jyS5FttgQoF6UcGhnM+gF9wU32B4lHO69nXw4FE= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.57.0 h1:1wEousrQOXTAhk16quIMIo1gSaUp1J3PEVlsiEAtmeU= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.57.0/go.mod h1:rUWyQu4HfRAG0jkr1TixDHP9IERQ/iEq/YwFoU73ddo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/contrib/propagators/b3 v1.32.0 h1:MazJBz2Zf6HTN/nK/s3Ru1qme+VhWU5hm83QxEP+dvw= go.opentelemetry.io/contrib/propagators/b3 v1.32.0/go.mod h1:B0s70QHYPrJwPOwD1o3V/R8vETNOG9N3qZf4LDYvA30= go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM= go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo= go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -961,10 +1085,12 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= golang.org/x/arch v0.11.0 h1:KXV8WWKCXm6tRpLirl2szsO5j/oOODwZf4hATmGVNs4= golang.org/x/arch v0.11.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -977,6 +1103,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= @@ -995,6 +1122,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1018,9 +1147,13 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1062,12 +1195,19 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1076,6 +1216,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1089,6 +1231,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1150,7 +1293,11 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1158,6 +1305,8 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= @@ -1166,6 +1315,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1178,6 +1329,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= @@ -1186,6 +1339,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1241,9 +1395,14 @@ golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1309,8 +1468,12 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1329,6 +1492,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1341,9 +1506,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1407,3 +1574,4 @@ sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77Vzej sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/packages/client-proxy/go.mod b/packages/client-proxy/go.mod index 57f29d1..ac4b779 100644 --- a/packages/client-proxy/go.mod +++ b/packages/client-proxy/go.mod @@ -1,28 +1,28 @@ module github.com/e2b-dev/infra/packages/proxy -go 1.24 +go 1.24.3 require ( github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/miekg/dns v1.1.63 - go.uber.org/zap v1.18.1 + go.uber.org/zap v1.27.0 ) require ( - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/pkg/errors v0.9.1 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/mod v0.18.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/sync v0.14.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/tools v0.22.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/grpc v1.72.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect ) replace github.com/e2b-dev/infra/packages/shared v0.0.0 => ../shared diff --git a/packages/client-proxy/go.sum b/packages/client-proxy/go.sum index 29fe2c9..5b1e48d 100644 --- a/packages/client-proxy/go.sum +++ b/packages/client-proxy/go.sum @@ -26,6 +26,7 @@ github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaW github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -61,12 +62,15 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -80,6 +84,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -90,6 +95,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -98,6 +105,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -106,10 +114,12 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -132,6 +142,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -139,10 +151,13 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/packages/db/Dockerfile b/packages/db/Dockerfile new file mode 100644 index 0000000..8b5a5bd --- /dev/null +++ b/packages/db/Dockerfile @@ -0,0 +1,31 @@ +# Builder stage +FROM golang:1.24-alpine AS builder + +# Shared +WORKDIR /build/shared + +COPY .shared/go.mod .shared/go.sum ./ +RUN go mod download + +COPY .shared/pkg pkg + +# +WORKDIR /build/db + +COPY go.mod go.sum ./ +RUN go mod download + +COPY scripts/migrator.go . + +RUN go build -o ./migrator ./migrator.go +RUN chmod +x ./migrator + +# Final stage +FROM alpine:latest + +COPY --from=builder /build/db/migrator /usr/local/bin/migrator + +WORKDIR /app +COPY /migrations ./migrations + +ENTRYPOINT ["migrator"] diff --git a/packages/db/Makefile b/packages/db/Makefile new file mode 100644 index 0000000..94b7603 --- /dev/null +++ b/packages/db/Makefile @@ -0,0 +1,43 @@ +ENV := $(shell cat ../../.last_used_env || echo "not-set") +-include ../../.env.${ENV} + +goose := GOOSE_DBSTRING=$(POSTGRES_CONNECTION_STRING) go tool goose -table "_migrations" -dir "migrations" postgres +IMAGE := e2b-orchestration/db-migrator + +.PHONY: migrate +migrate:migrate/up +migrate:migrate/down +migrate/%: + @echo "Applying Postgres migration *$(notdir $@)*" + @$(goose) $(notdir $@) + @echo "Done" + +.PHONY: build-debug +build-debug: + go mod download + go vet ./... + +.PHONE: create-migration +create-migration: +ifeq ($(origin NAME), undefined) + @echo "The expected syntax is: make migration-create NAME=your-migration-name" + @exit 1 +endif + @$(goose) create $(NAME) sql + +.PHONE: status +status: + @$(goose) status + +.PHONY: generate +generate: + rm -rf queries/*.go + go tool sqlc generate + + +.PHONY: build-and-upload +build-and-upload: + @rm -rf .shared/ + @cp -r ../shared .shared/ + @docker build --platform linux/amd64 --tag "$(GCP_REGION)-docker.pkg.dev/$(GCP_PROJECT_ID)/$(IMAGE)" --push . + @rm -rf .shared/ diff --git a/packages/db/client/client.go b/packages/db/client/client.go new file mode 100644 index 0000000..1080075 --- /dev/null +++ b/packages/db/client/client.go @@ -0,0 +1,76 @@ +package client + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + _ "github.com/lib/pq" + "go.uber.org/zap" + + database "github.com/e2b-dev/infra/packages/db/queries" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +type Client struct { + *database.Queries + ctx context.Context + conn *pgxpool.Pool +} + +type Option func(config *pgxpool.Config) + +func WithMaxConnections(maxConns int32) Option { + return func(config *pgxpool.Config) { + config.MaxConns = maxConns + } +} + +func WithMinIdle(minIdle int32) Option { + return func(config *pgxpool.Config) { + config.MinIdleConns = minIdle + } +} + +func NewClient(ctx context.Context, options ...Option) (*Client, error) { + databaseURL := utils.RequiredEnv("POSTGRES_CONNECTION_STRING", "Postgres connection string") + + // Parse the connection pool configuration + config, err := pgxpool.ParseConfig(databaseURL) + if err != nil { + zap.L().Error("Unable to parse database URL", zap.Error(err)) + + return nil, err + } + + // Set the default number of connections + for _, option := range options { + option(config) + } + + // Create the connection pool + pool, err := pgxpool.NewWithConfig(ctx, config) + if err != nil { + zap.L().Error("Unable to create connection pool", zap.Error(err)) + } + + queries := database.New(pool) + + return &Client{Queries: queries, ctx: ctx, conn: pool}, nil +} + +func (db *Client) Close() error { + db.conn.Close() + return nil +} + +// WithTx runs the given function in a transaction. +func (db *Client) WithTx(ctx context.Context) (*Client, pgx.Tx, error) { + tx, err := db.conn.BeginTx(ctx, pgx.TxOptions{}) + if err != nil { + return nil, nil, err + } + + client := &Client{Queries: db.Queries.WithTx(tx), conn: db.conn, ctx: db.ctx} + return client, tx, nil +} diff --git a/packages/db/go.mod b/packages/db/go.mod new file mode 100644 index 0000000..507c51c --- /dev/null +++ b/packages/db/go.mod @@ -0,0 +1,108 @@ +module github.com/e2b-dev/infra/packages/db + +go 1.24.3 + +require ( + github.com/e2b-dev/infra/packages/shared v0.0.0-20250324174051-3fb806938dc1 + github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.7.4 + github.com/lib/pq v1.10.9 + github.com/pressly/goose/v3 v3.24.2 + go.uber.org/zap v1.27.0 +) + +require ( + cel.dev/expr v0.24.0 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/ClickHouse/ch-go v0.65.1 // indirect + github.com/ClickHouse/clickhouse-go/v2 v2.33.1 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect + github.com/coder/websocket v1.8.13 // indirect + github.com/cubicdaiya/gonp v1.0.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/elastic/go-sysinfo v1.15.2 // indirect + github.com/elastic/go-windows v1.0.2 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/go-faster/city v1.0.1 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-sql-driver/mysql v1.9.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect + github.com/golang-sql/sqlexp v0.1.0 // indirect + github.com/google/cel-go v0.25.0 // indirect + github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/jonboulle/clockwork v0.5.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/mfridman/xflag v0.1.0 // indirect + github.com/microsoft/go-mssqldb v1.8.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/paulmach/orb v0.11.1 // indirect + github.com/pganalyze/pg_query_go/v6 v6.1.0 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pingcap/errors v0.11.5-0.20240311024730-e056997136bb // indirect + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 // indirect + github.com/pingcap/log v1.1.0 // indirect + github.com/pingcap/tidb/pkg/parser v0.0.0-20250324122243-d51e00e5bbf0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/procfs v0.16.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/riza-io/grpc-go v0.2.0 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/sqlc-dev/sqlc v1.29.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/tetratelabs/wazero v1.9.0 // indirect + github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d // indirect + github.com/vertica/vertica-sql-go v1.3.3 // indirect + github.com/wasilibs/go-pgquery v0.0.0-20250409022910-10ac41983c07 // indirect + github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect + github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 // indirect + github.com/ydb-platform/ydb-go-sdk/v3 v3.104.7 // indirect + github.com/ziutek/mymysql v1.5.4 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/mock v0.5.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/sync v0.14.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/grpc v1.72.1 // indirect + google.golang.org/protobuf v1.36.6 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + howett.net/plist v1.0.1 // indirect + modernc.org/libc v1.62.1 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.9.1 // indirect + modernc.org/sqlite v1.37.0 // indirect +) + +replace github.com/e2b-dev/infra/packages/shared => ../shared + +tool ( + github.com/pressly/goose/v3/cmd/goose + github.com/sqlc-dev/sqlc/cmd/sqlc +) diff --git a/packages/db/go.sum b/packages/db/go.sum new file mode 100644 index 0000000..e7d00a8 --- /dev/null +++ b/packages/db/go.sum @@ -0,0 +1,447 @@ +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR8qd/Jw1Le0NZebGBUCLbtak3bJ3z1OlqZBpw= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= +github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go/v2 v2.33.1 h1:Z5nO/AnmUywcw0AvhAD0M1C2EaMspnXRK9vEOLxgmI0= +github.com/ClickHouse/clickhouse-go/v2 v2.33.1/go.mod h1:cb1Ss8Sz8PZNdfvEBwkMAdRhoyB6/HiB6o3We5ZIcE4= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= +github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cubicdaiya/gonp v1.0.4 h1:ky2uIAJh81WiLcGKBVD5R7KsM/36W6IqqTy6Bo6rGws= +github.com/cubicdaiya/gonp v1.0.4/go.mod h1:iWGuP/7+JVTn02OWhRemVbMmG1DOUnmrGTYYACpOI0I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM= +github.com/elastic/go-sysinfo v1.15.2 h1:rgUFj4xRnxdAaxh4IhuGzHINWT8WrwUe5D338LLRC0s= +github.com/elastic/go-sysinfo v1.15.2/go.mod h1:jPSuTgXG+dhhh0GKIyI2Cso+w5lPJ5PvVqKlL8LV/Hk= +github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.2 h1:yoLLsAsV5cfg9FLhZ9EXZ2n2sQFKeDYrHenkcivY4vI= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/cel-go v0.25.0 h1:jsFw9Fhn+3y2kBbltZR4VEz5xKkcIFRPDnuEzAGv5GY= +github.com/google/cel-go v0.25.0/go.mod h1:hjEb6r5SuOSlhCHmFoLzu8HGCERvIsDAbxDAyNU/MmI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= +github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/mfridman/xflag v0.1.0 h1:TWZrZwG1QklFX5S4j1vxfF1sZbZeZSGofMwPMLAF29M= +github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxWWnjRaE= +github.com/microsoft/go-mssqldb v1.8.0 h1:7cyZ/AT7ycDsEoWPIXibd+aVKFtteUNhDGf3aobP+tw= +github.com/microsoft/go-mssqldb v1.8.0/go.mod h1:6znkekS3T2vp0waiMhen4GPU1BiAsrP+iXHcE7a7rFo= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pganalyze/pg_query_go/v6 v6.1.0 h1:jG5ZLhcVgL1FAw4C/0VNQaVmX1SUJx71wBGdtTtBvls= +github.com/pganalyze/pg_query_go/v6 v6.1.0/go.mod h1:nvTHIuoud6e1SfrUaFwHqT0i4b5Nr+1rPWVds3B5+50= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20240311024730-e056997136bb h1:3pSi4EDG6hg0orE1ndHkXvX6Qdq2cZn8gAPir8ymKZk= +github.com/pingcap/errors v0.11.5-0.20240311024730-e056997136bb/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= +github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8= +github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= +github.com/pingcap/tidb/pkg/parser v0.0.0-20250324122243-d51e00e5bbf0 h1:W3rpAI3bubR6VWOcwxDIG0Gz9G5rl5b3SL116T0vBt0= +github.com/pingcap/tidb/pkg/parser v0.0.0-20250324122243-d51e00e5bbf0/go.mod h1:+8feuexTKcXHZF/dkDfvCwEyBAmgb4paFc3/WeYV2eE= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pressly/goose/v3 v3.24.2 h1:c/ie0Gm8rnIVKvnDQ/scHErv46jrDv9b4I0WRcFJzYU= +github.com/pressly/goose/v3 v3.24.2/go.mod h1:kjefwFB0eR4w30Td2Gj2Mznyw94vSP+2jJYkOVNbD1k= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= +github.com/rekby/fixenv v0.6.1 h1:jUFiSPpajT4WY2cYuc++7Y1zWrnCxnovGCIX72PZniM= +github.com/rekby/fixenv v0.6.1/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/riza-io/grpc-go v0.2.0 h1:2HxQKFVE7VuYstcJ8zqpN84VnAoJ4dCL6YFhJewNcHQ= +github.com/riza-io/grpc-go v0.2.0/go.mod h1:2bDvR9KkKC3KhtlSHfR3dAXjUMT86kg4UfWFyVGWqi8= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/sqlc-dev/sqlc v1.29.0 h1:HQctoD7y/i29Bao53qXO7CZ/BV9NcvpGpsJWvz9nKWs= +github.com/sqlc-dev/sqlc v1.29.0/go.mod h1:BavmYw11px5AdPOjAVHmb9fctP5A8GTziC38wBF9tp0= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d h1:dOMI4+zEbDI37KGb0TI44GUAwxHF9cMsIoDTJ7UmgfU= +github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d/go.mod h1:l8xTsYB90uaVdMHXMCxKKLSgw5wLYBwBKKefNIUnm9s= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/wasilibs/go-pgquery v0.0.0-20250409022910-10ac41983c07 h1:mJdDDPblDfPe7z7go8Dvv1AJQDI3eQ/5xith3q2mFlo= +github.com/wasilibs/go-pgquery v0.0.0-20250409022910-10ac41983c07/go.mod h1:Ak17IJ037caFp4jpCw/iQQ7/W74Sqpb1YuKJU6HTKfM= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 h1:OvLBa8SqJnZ6P+mjlzc2K7PM22rRUPE1x32G9DTPrC4= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52/go.mod h1:jMeV4Vpbi8osrE/pKUxRZkVaA0EX7NZN0A9/oRzgpgY= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77 h1:LY6cI8cP4B9rrpTleZk95+08kl2gF4rixG7+V/dwL6Q= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20241112172322-ea1f63298f77/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.104.7 h1:d05IBvxm7X+5xo6tdZ/vHdgJF6MV+cFBEtsAGo19CjE= +github.com/ydb-platform/ydb-go-sdk/v3 v3.104.7/go.mod h1:l5sSv153E18VvYcsmr51hok9Sjc16tEC8AXGbwrk+ho= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= +go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic= +modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU= +modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s= +modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.9.1 h1:V/Z1solwAVmMW1yttq3nDdZPJqV1rM05Ccq6KMSZ34g= +modernc.org/memory v1.9.1/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI= +modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/packages/shared/migrations/20000101000000.sql b/packages/db/migrations/20000101000000_auth.sql similarity index 54% rename from packages/shared/migrations/20000101000000.sql rename to packages/db/migrations/20000101000000_auth.sql index 53fe797..f49f968 100644 --- a/packages/shared/migrations/20000101000000.sql +++ b/packages/db/migrations/20000101000000_auth.sql @@ -1,3 +1,5 @@ +-- +goose Up +-- +goose StatementBegin CREATE SCHEMA IF NOT EXISTS auth; -- Create RLS policies for user management @@ -34,10 +36,30 @@ DO $$ $$; --- Create "users" table -CREATE TABLE IF NOT EXISTS "auth"."users" -( - "id" uuid NOT NULL DEFAULT gen_random_uuid(), - "email" text NOT NULL, - PRIMARY KEY ("id") -); +-- Grant execute on auth.uid() to postgres role +GRANT EXECUTE ON FUNCTION auth.uid() TO postgres; + +-- Check if the table exists before trying to create it +DO $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'auth' + AND table_name = 'users' + ) THEN + EXECUTE ' + CREATE TABLE auth.users ( + id uuid NOT NULL DEFAULT gen_random_uuid(), + email text NOT NULL, + PRIMARY KEY (id) + );'; + END IF; + END; +$$; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20000101000001_rls_for_migration_table.sql b/packages/db/migrations/20000101000001_rls_for_migration_table.sql new file mode 100644 index 0000000..ad6f5a8 --- /dev/null +++ b/packages/db/migrations/20000101000001_rls_for_migration_table.sql @@ -0,0 +1,8 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE "public"."_migrations" ENABLE ROW LEVEL SECURITY; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20231124185944.sql b/packages/db/migrations/20231124185944_create_schemas_and_tables.sql similarity index 90% rename from packages/shared/migrations/20231124185944.sql rename to packages/db/migrations/20231124185944_create_schemas_and_tables.sql index bacdf2b..94af814 100644 --- a/packages/shared/migrations/20231124185944.sql +++ b/packages/db/migrations/20231124185944_create_schemas_and_tables.sql @@ -1,8 +1,11 @@ +-- +goose Up +-- +goose StatementBegin + -- Add new schema named "auth" -CREATE SCHEMA IF NOT EXISTS "auth"; CREATE SCHEMA IF NOT EXISTS "extensions"; + -- Create "tiers" table -CREATE TABLE "public"."tiers" +CREATE TABLE IF NOT EXISTS "public"."tiers" ( "id" text NOT NULL, "name" text NOT NULL, @@ -23,7 +26,7 @@ COMMENT ON COLUMN public.tiers.concurrent_instances IS 'The number of instances the team can run concurrently'; -- Create "teams" table -CREATE TABLE "public"."teams" +CREATE TABLE IF NOT EXISTS "public"."teams" ( "id" uuid DEFAULT gen_random_uuid(), "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -37,7 +40,7 @@ CREATE TABLE "public"."teams" ALTER TABLE "public"."teams" ENABLE ROW LEVEL SECURITY; -- Create "envs" table -CREATE TABLE "public"."envs" +CREATE TABLE IF NOT EXISTS "public"."envs" ( "id" text NOT NULL, "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -60,7 +63,7 @@ COMMENT ON COLUMN public.envs.spawn_count IS 'Number of times the env was spawned'; -- Create "env_aliases" table -CREATE TABLE "public"."env_aliases" +CREATE TABLE IF NOT EXISTS "public"."env_aliases" ( "alias" text NOT NULL, "is_name" boolean NOT NULL DEFAULT true, @@ -71,7 +74,7 @@ CREATE TABLE "public"."env_aliases" ALTER TABLE "public"."env_aliases" ENABLE ROW LEVEL SECURITY; -- Create "team_api_keys" table -CREATE TABLE "public"."team_api_keys" +CREATE TABLE IF NOT EXISTS "public"."team_api_keys" ( "api_key" character varying(44) NOT NULL, "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, @@ -81,16 +84,8 @@ CREATE TABLE "public"."team_api_keys" ); ALTER TABLE "public"."team_api_keys" ENABLE ROW LEVEL SECURITY; --- Create "users" table -CREATE TABLE IF NOT EXISTS "auth"."users" -( - "id" uuid NOT NULL DEFAULT gen_random_uuid(), - "email" character varying(255) NOT NULL, - PRIMARY KEY ("id") -); - -- Create "access_tokens" table -CREATE TABLE "public"."access_tokens" +CREATE TABLE IF NOT EXISTS "public"."access_tokens" ( "access_token" text NOT NULL, "user_id" uuid NOT NULL, @@ -101,7 +96,7 @@ CREATE TABLE "public"."access_tokens" ALTER TABLE "public"."access_tokens" ENABLE ROW LEVEL SECURITY; -- Create "users_teams" table -CREATE TABLE "public"."users_teams" +CREATE TABLE IF NOT EXISTS "public"."users_teams" ( "id" bigint NOT NULL GENERATED BY DEFAULT AS IDENTITY, "user_id" uuid NOT NULL, @@ -154,4 +149,10 @@ BEGIN END $$; -- Create index "usersteams_team_id_user_id" to table: "users_teams" -CREATE UNIQUE INDEX "usersteams_team_id_user_id" ON "public"."users_teams" ("team_id", "user_id"); +CREATE UNIQUE INDEX IF NOT EXISTS "usersteams_team_id_user_id" ON "public"."users_teams" ("team_id", "user_id"); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20231220094836.sql b/packages/db/migrations/20231220094836_create_triggers_and_policies.sql similarity index 96% rename from packages/shared/migrations/20231220094836.sql rename to packages/db/migrations/20231220094836_create_triggers_and_policies.sql index edfe32c..0d4501a 100644 --- a/packages/shared/migrations/20231220094836.sql +++ b/packages/db/migrations/20231220094836_create_triggers_and_policies.sql @@ -1,3 +1,6 @@ +-- +goose Up +-- +goose StatementBegin + -- Add base tier INSERT INTO public.tiers (id, name, vcpu, ram_mb, disk_mb, concurrent_instances) VALUES ('base_v1', 'Base tier', 2, 512, 512, 20); @@ -117,3 +120,9 @@ CREATE POLICY "Allow to create a team api key to new user" FOR INSERT TO trigger_user WITH CHECK (TRUE); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20231222181015_add_env_resources.sql b/packages/db/migrations/20231222181015_add_env_resources.sql new file mode 100644 index 0000000..5003ae0 --- /dev/null +++ b/packages/db/migrations/20231222181015_add_env_resources.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "envs" table +ALTER TABLE "public"."envs" + ADD COLUMN IF NOT EXISTS "vcpu" bigint NULL, + ADD COLUMN IF NOT EXISTS "ram_mb" bigint NULL, + ADD COLUMN IF NOT EXISTS "free_disk_size_mb" bigint NULL, + ADD COLUMN IF NOT EXISTS "total_disk_size_mb" bigint NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20240103104619.sql b/packages/db/migrations/20240103104619_add_team_email.sql similarity index 61% rename from packages/shared/migrations/20240103104619.sql rename to packages/db/migrations/20240103104619_add_team_email.sql index f6cfd08..b42d53e 100644 --- a/packages/shared/migrations/20240103104619.sql +++ b/packages/db/migrations/20240103104619_add_team_email.sql @@ -1,5 +1,8 @@ +-- +goose Up +-- +goose StatementBegin + -- Modify "teams" table -ALTER TABLE "public"."teams" ADD COLUMN "email" character varying(255) NULL; +ALTER TABLE "public"."teams" ADD COLUMN IF NOT EXISTS "email" character varying(255) NULL; CREATE OR REPLACE FUNCTION public.generate_default_team() RETURNS TRIGGER @@ -15,6 +18,12 @@ BEGIN END $create_default_team$ SECURITY DEFINER SET search_path = public; -UPDATE "public"."teams" SET "email" = "name"; +UPDATE "public"."teams" SET "email" = "name" WHERE "email" IS NULL; + +ALTER TABLE "public"."teams" ALTER COLUMN "email" SET NOT NULL; + +-- +goose StatementEnd -ALTER TABLE "public"."teams" ALTER COLUMN "email" SET NOT NULL; \ No newline at end of file +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240106121919_add_team_ban_fields.sql b/packages/db/migrations/20240106121919_add_team_ban_fields.sql new file mode 100644 index 0000000..3b2886c --- /dev/null +++ b/packages/db/migrations/20240106121919_add_team_ban_fields.sql @@ -0,0 +1,13 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "teams" table +ALTER TABLE "public"."teams" + ADD COLUMN IF NOT EXISTS "is_banned" boolean NOT NULL DEFAULT false, + ADD COLUMN IF NOT EXISTS "blocked_reason" text NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240202120312_add_kernel_version.sql b/packages/db/migrations/20240202120312_add_kernel_version.sql new file mode 100644 index 0000000..6a35e52 --- /dev/null +++ b/packages/db/migrations/20240202120312_add_kernel_version.sql @@ -0,0 +1,19 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "envs" table +ALTER TABLE "public"."envs" ADD COLUMN IF NOT EXISTS "kernel_version" character varying NULL; + +-- Update existing records +UPDATE "public"."envs" SET "kernel_version" = 'vmlinux-5.10.186-old' WHERE "kernel_version" IS NULL; + +-- Make kernel_version NOT NULL and set default +ALTER TABLE "public"."envs" + ALTER COLUMN "kernel_version" SET NOT NULL, + ALTER COLUMN "kernel_version" SET DEFAULT 'vmlinux-5.10.186'; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240219190940_add_max_length_hours.sql b/packages/db/migrations/20240219190940_add_max_length_hours.sql new file mode 100644 index 0000000..2926a38 --- /dev/null +++ b/packages/db/migrations/20240219190940_add_max_length_hours.sql @@ -0,0 +1,17 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "tiers" table +ALTER TABLE "public"."tiers" ADD COLUMN IF NOT EXISTS "max_length_hours" bigint NULL; + +-- Update existing records +UPDATE "public"."tiers" SET "max_length_hours" = 1 WHERE "max_length_hours" IS NULL; + +-- Make max_length_hours NOT NULL +ALTER TABLE "public"."tiers" ALTER COLUMN "max_length_hours" SET NOT NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240221023613_add_firecracker_version.sql b/packages/db/migrations/20240221023613_add_firecracker_version.sql new file mode 100644 index 0000000..41c7faa --- /dev/null +++ b/packages/db/migrations/20240221023613_add_firecracker_version.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "envs" table +ALTER TABLE "public"."envs" ADD COLUMN IF NOT EXISTS "firecracker_version" character varying NOT NULL DEFAULT 'v1.5.0_8a43b32e'; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240221215408_update_firecracker_version.sql b/packages/db/migrations/20240221215408_update_firecracker_version.sql new file mode 100644 index 0000000..f1b5062 --- /dev/null +++ b/packages/db/migrations/20240221215408_update_firecracker_version.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "envs" table +ALTER TABLE "public"."envs" +ALTER COLUMN "firecracker_version" SET DEFAULT 'v1.7.0-dev_8bb88311'; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240305221944_remove_tier_resources.sql b/packages/db/migrations/20240305221944_remove_tier_resources.sql new file mode 100644 index 0000000..5978cf4 --- /dev/null +++ b/packages/db/migrations/20240305221944_remove_tier_resources.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "tiers" table +ALTER TABLE "public"."tiers" + DROP CONSTRAINT IF EXISTS "tiers_ram_mb_check", + DROP CONSTRAINT IF EXISTS "tiers_vcpu_check", + DROP COLUMN IF EXISTS "vcpu", + DROP COLUMN IF EXISTS "ram_mb"; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20240315165236.sql b/packages/db/migrations/20240315165236_create_env_builds.sql similarity index 65% rename from packages/shared/migrations/20240315165236.sql rename to packages/db/migrations/20240315165236_create_env_builds.sql index 30c9b52..0c28d92 100644 --- a/packages/shared/migrations/20240315165236.sql +++ b/packages/db/migrations/20240315165236_create_env_builds.sql @@ -1,6 +1,9 @@ +-- +goose Up +-- +goose StatementBegin + -- Modify "env_aliases" table -ALTER TABLE "public"."env_aliases" RENAME COLUMN "is_name" TO "is_renamable"; -ALTER TABLE "public"."env_aliases" ALTER COLUMN "env_id" SET NOT NULL; +ALTER TABLE IF EXISTS "public"."env_aliases" RENAME COLUMN "is_name" TO "is_renamable"; +ALTER TABLE IF EXISTS "public"."env_aliases" ALTER COLUMN "env_id" SET NOT NULL; -- Create "env_builds" table CREATE TABLE "public"."env_builds" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, "updated_at" timestamptz NOT NULL, "finished_at" timestamptz NULL, "status" text NOT NULL DEFAULT 'waiting', "dockerfile" text NULL, "start_cmd" text NULL, "vcpu" bigint NOT NULL, "ram_mb" bigint NOT NULL, "free_disk_size_mb" bigint NOT NULL, "total_disk_size_mb" bigint NULL, "kernel_version" text NOT NULL DEFAULT 'vmlinux-5.10.186', "firecracker_version" text NOT NULL DEFAULT 'v1.7.0-dev_8bb88311', "env_id" text NULL, PRIMARY KEY ("id"), CONSTRAINT "env_builds_envs_builds" FOREIGN KEY ("env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE); @@ -12,4 +15,18 @@ SELECT CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, 'success', dockerfile, NULL, vcpu, FROM "public"."envs"; -- Modify "envs" table -ALTER TABLE "public"."envs" DROP COLUMN "dockerfile", DROP COLUMN "build_id", DROP COLUMN "vcpu", DROP COLUMN "ram_mb", DROP COLUMN "free_disk_size_mb", DROP COLUMN "total_disk_size_mb", DROP COLUMN "kernel_version", DROP COLUMN "firecracker_version"; +ALTER TABLE IF EXISTS "public"."envs" + DROP COLUMN IF EXISTS "dockerfile", + DROP COLUMN IF EXISTS "build_id", + DROP COLUMN IF EXISTS "vcpu", + DROP COLUMN IF EXISTS "ram_mb", + DROP COLUMN IF EXISTS "free_disk_size_mb", + DROP COLUMN IF EXISTS "total_disk_size_mb", + DROP COLUMN IF EXISTS "kernel_version", + DROP COLUMN IF EXISTS "firecracker_version"; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20240605070918.sql b/packages/db/migrations/20240605070918_refactor_triggers_and_policies.sql similarity index 90% rename from packages/shared/migrations/20240605070918.sql rename to packages/db/migrations/20240605070918_refactor_triggers_and_policies.sql index 1694209..e858f71 100644 --- a/packages/shared/migrations/20240605070918.sql +++ b/packages/db/migrations/20240605070918_refactor_triggers_and_policies.sql @@ -1,9 +1,12 @@ -DROP TRIGGER create_default_team ON auth.users; -DROP FUNCTION generate_default_team_trigger(); -DROP TRIGGER team_api_keys_trigger ON public.teams; -DROP FUNCTION generate_teams_api_keys_trigger(); -DROP TRIGGER create_access_token ON auth.users; -DROP FUNCTION generate_access_token_trigger(); +-- +goose Up +-- +goose StatementBegin + +DROP TRIGGER IF EXISTS create_default_team ON auth.users; +DROP FUNCTION IF EXISTS generate_default_team_trigger(); +DROP TRIGGER IF EXISTS team_api_keys_trigger ON public.teams; +DROP FUNCTION IF EXISTS generate_teams_api_keys_trigger(); +DROP TRIGGER IF EXISTS create_access_token ON auth.users; +DROP FUNCTION IF EXISTS generate_access_token_trigger(); CREATE OR REPLACE FUNCTION public.extra_for_post_user_signup(user_id uuid, team_id uuid) RETURNS void @@ -75,12 +78,10 @@ $post_user_signup$ SECURITY DEFINER SET search_path = public; ALTER FUNCTION public.post_user_signup() OWNER TO trigger_user; - CREATE OR REPLACE TRIGGER post_user_signup AFTER INSERT ON auth.users FOR EACH ROW EXECUTE FUNCTION post_user_signup(); - CREATE OR REPLACE FUNCTION is_member_of_team(_user_id uuid, _team_id uuid) RETURNS bool AS $$ SELECT EXISTS ( SELECT 1 @@ -138,4 +139,9 @@ DO $$ END; END $$; -; \ No newline at end of file + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240625095352_add_envd_version.sql b/packages/db/migrations/20240625095352_add_envd_version.sql new file mode 100644 index 0000000..4825621 --- /dev/null +++ b/packages/db/migrations/20240625095352_add_envd_version.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "env_builds" table +ALTER TABLE "public"."env_builds" ADD COLUMN IF NOT EXISTS "envd_version" text NULL; + +-- Populate "envd_version" column if it was just added +UPDATE "public"."env_builds" SET "envd_version" = 'v0.0.1' +WHERE "envd_version" IS NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20240728094137.sql b/packages/db/migrations/20240728094137_add_default_team_flag.sql similarity index 62% rename from packages/shared/migrations/20240728094137.sql rename to packages/db/migrations/20240728094137_add_default_team_flag.sql index 5e69b5b..8b07486 100644 --- a/packages/shared/migrations/20240728094137.sql +++ b/packages/db/migrations/20240728094137_add_default_team_flag.sql @@ -1,7 +1,17 @@ --- Modify "access_tokens" table -ALTER TABLE "public"."users_teams" ADD COLUMN "is_default" boolean NOT NULL DEFAULT false; -UPDATE "public"."users_teams" ut SET "is_default" = t."is_default" FROM "public"."teams" t WHERE ut."team_id" = t."id"; +-- +goose Up +-- +goose StatementBegin +-- Modify "users_teams" table +ALTER TABLE "public"."users_teams" ADD COLUMN IF NOT EXISTS "is_default" boolean NOT NULL DEFAULT false; + +-- Update existing records +UPDATE "public"."users_teams" ut +SET "is_default" = t."is_default" +FROM "public"."teams" t +WHERE ut."team_id" = t."id" +AND ut."is_default" = false; + +-- Create or replace function CREATE OR REPLACE FUNCTION public.post_user_signup() RETURNS TRIGGER LANGUAGE plpgsql @@ -27,4 +37,10 @@ BEGIN END $post_user_signup$ SECURITY DEFINER SET search_path = public; -ALTER FUNCTION public.post_user_signup() OWNER TO trigger_user; \ No newline at end of file +ALTER FUNCTION public.post_user_signup() OWNER TO trigger_user; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20240909142106_drop_team_policies.sql b/packages/db/migrations/20240909142106_drop_team_policies.sql new file mode 100644 index 0000000..3643354 --- /dev/null +++ b/packages/db/migrations/20240909142106_drop_team_policies.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin + +DROP POLICY IF EXISTS "Allow update for users that are in the team" ON "public"."teams"; +DROP POLICY IF EXISTS "Allow users to delete a team user entry" ON "public"."users_teams"; +DROP POLICY IF EXISTS "Allow users to create a new team user entry" ON "public"."users_teams"; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20241120222814_add_team_api_key_metadata.sql b/packages/db/migrations/20241120222814_add_team_api_key_metadata.sql new file mode 100644 index 0000000..9d3b2b1 --- /dev/null +++ b/packages/db/migrations/20241120222814_add_team_api_key_metadata.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "team_api_keys" table +ALTER TABLE "public"."team_api_keys" + ADD COLUMN IF NOT EXISTS "updated_at" timestamptz NULL, + ADD COLUMN IF NOT EXISTS "name" text NOT NULL DEFAULT 'Unnamed API Key', + ADD COLUMN IF NOT EXISTS "last_used" timestamptz NULL, + ADD COLUMN IF NOT EXISTS "created_by" uuid NULL; + +-- Add constraint separately +ALTER TABLE "public"."team_api_keys" + ADD CONSTRAINT "team_api_keys_users_created_api_keys" + FOREIGN KEY ("created_by") + REFERENCES "auth"."users" ("id") + ON UPDATE NO ACTION + ON DELETE SET NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20241121225404_add_team_api_key_id.sql b/packages/db/migrations/20241121225404_add_team_api_key_id.sql new file mode 100644 index 0000000..3911e1e --- /dev/null +++ b/packages/db/migrations/20241121225404_add_team_api_key_id.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "team_api_keys" table +ALTER TABLE "public"."team_api_keys" DROP CONSTRAINT IF EXISTS "team_api_keys_pkey"; +ALTER TABLE "public"."team_api_keys" ADD COLUMN IF NOT EXISTS "id" uuid NOT NULL DEFAULT gen_random_uuid(); +ALTER TABLE "public"."team_api_keys" ADD PRIMARY KEY ("id"); + +-- Create index if it doesn't exist +CREATE UNIQUE INDEX IF NOT EXISTS "team_api_keys_api_key_key" ON "public"."team_api_keys" ("api_key"); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20241127174604_add_env_creator.sql b/packages/db/migrations/20241127174604_add_env_creator.sql new file mode 100644 index 0000000..e522069 --- /dev/null +++ b/packages/db/migrations/20241127174604_add_env_creator.sql @@ -0,0 +1,20 @@ +-- +goose Up +-- +goose StatementBegin + +-- Modify "envs" table +ALTER TABLE "public"."envs" + ADD COLUMN IF NOT EXISTS "created_by" uuid NULL; + +-- Add constraint +ALTER TABLE "public"."envs" + ADD CONSTRAINT "envs_users_created_envs" + FOREIGN KEY ("created_by") + REFERENCES "auth"."users" ("id") + ON UPDATE NO ACTION + ON DELETE SET NULL; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20241206124325_add_user_team_adder.sql b/packages/db/migrations/20241206124325_add_user_team_adder.sql new file mode 100644 index 0000000..09423e1 --- /dev/null +++ b/packages/db/migrations/20241206124325_add_user_team_adder.sql @@ -0,0 +1,21 @@ +-- +goose Up +-- +goose StatementBegin + +-- Add column to "users_teams" table +ALTER TABLE "public"."users_teams" + ADD COLUMN IF NOT EXISTS "added_by" uuid NULL; + +-- Add constraint +ALTER TABLE "public"."users_teams" + ADD CONSTRAINT "users_teams_added_by_user" + FOREIGN KEY ("added_by") + REFERENCES "auth"."users" ("id") + ON UPDATE NO ACTION + ON DELETE SET NULL; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20241213142106.sql b/packages/db/migrations/20241213142106_create_snapshots.sql similarity index 55% rename from packages/shared/migrations/20241213142106.sql rename to packages/db/migrations/20241213142106_create_snapshots.sql index b3fd9df..24565bd 100644 --- a/packages/shared/migrations/20241213142106.sql +++ b/packages/db/migrations/20241213142106_create_snapshots.sql @@ -1,12 +1,21 @@ +-- +goose Up +-- +goose StatementBegin + -- Create "snapshots" table -CREATE TABLE "public"."snapshots" -( +CREATE TABLE IF NOT EXISTS "public"."snapshots" ( created_at timestamp with time zone null, env_id text not null, sandbox_id text not null, - id uuid not null default gen_random_uuid (), + id uuid not null default gen_random_uuid(), metadata jsonb null, base_env_id text not null, constraint snapshots_pkey primary key (id) ); ALTER TABLE "public"."snapshots" ENABLE ROW LEVEL SECURITY; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/shared/migrations/20250106142106.sql b/packages/db/migrations/20250106142106_remove_team_is_default.sql similarity index 80% rename from packages/shared/migrations/20250106142106.sql rename to packages/db/migrations/20250106142106_remove_team_is_default.sql index a82af24..9d62b62 100644 --- a/packages/shared/migrations/20250106142106.sql +++ b/packages/db/migrations/20250106142106_remove_team_is_default.sql @@ -1,6 +1,10 @@ +-- +goose Up +-- +goose StatementBegin + -- Alter "teams" table -ALTER TABLE "public"."teams" DROP COLUMN "is_default"; +ALTER TABLE "public"."teams" DROP COLUMN IF EXISTS "is_default"; +-- Create or replace function CREATE OR REPLACE FUNCTION public.post_user_signup() RETURNS TRIGGER LANGUAGE plpgsql @@ -25,3 +29,9 @@ BEGIN RETURN NEW; END $post_user_signup$ SECURITY DEFINER SET search_path = public; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20250206105106_add_snapshot_constraints.sql b/packages/db/migrations/20250206105106_add_snapshot_constraints.sql new file mode 100644 index 0000000..b36ae1f --- /dev/null +++ b/packages/db/migrations/20250206105106_add_snapshot_constraints.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE "public"."snapshots" + ADD CONSTRAINT "snapshots_envs_env_id" + FOREIGN KEY ("env_id") + REFERENCES "public"."envs" ("id") + ON UPDATE NO ACTION + ON DELETE CASCADE; + +ALTER TABLE "public"."snapshots" + ADD CONSTRAINT "snapshots_envs_base_env_id" + FOREIGN KEY ("base_env_id") + REFERENCES "public"."envs" ("id") + ON UPDATE NO ACTION + ON DELETE CASCADE; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20250211160814_add_token_hashes.sql b/packages/db/migrations/20250211160814_add_token_hashes.sql new file mode 100644 index 0000000..cf74476 --- /dev/null +++ b/packages/db/migrations/20250211160814_add_token_hashes.sql @@ -0,0 +1,24 @@ +-- +goose Up +-- +goose StatementBegin + +-- Add new columns to team_api_keys table +ALTER TABLE team_api_keys + ADD COLUMN IF NOT EXISTS api_key_hash TEXT UNIQUE, + ADD COLUMN IF NOT EXISTS api_key_mask VARCHAR(44); + +-- Add new columns to access_tokens table +ALTER TABLE access_tokens + ADD COLUMN IF NOT EXISTS id UUID DEFAULT gen_random_uuid(), + ADD COLUMN IF NOT EXISTS access_token_hash TEXT UNIQUE, + ADD COLUMN IF NOT EXISTS access_token_mask TEXT, + ADD COLUMN IF NOT EXISTS name TEXT NOT NULL DEFAULT 'Unnamed Access Token'; + +-- Mark sensitive columns as sensitive +COMMENT ON COLUMN team_api_keys.api_key_hash IS 'sensitive'; +COMMENT ON COLUMN access_tokens.access_token_hash IS 'sensitive'; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20250306105106_add_indexes.sql b/packages/db/migrations/20250306105106_add_indexes.sql new file mode 100644 index 0000000..86326e5 --- /dev/null +++ b/packages/db/migrations/20250306105106_add_indexes.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE INDEX IF NOT EXISTS idx_envs_builds_envs ON public.env_builds (env_id); +CREATE INDEX IF NOT EXISTS idx_envs_envs_aliases ON public.env_aliases (env_id); +CREATE INDEX IF NOT EXISTS idx_users_access_tokens ON public.access_tokens (user_id); +CREATE INDEX IF NOT EXISTS idx_teams_envs ON public.envs (team_id); +CREATE INDEX IF NOT EXISTS idx_team_team_api_keys ON public.team_api_keys (team_id); +CREATE INDEX IF NOT EXISTS idx_teams_user_teams ON public.users_teams (team_id); +CREATE INDEX IF NOT EXISTS idx_users_user_teams ON public.users_teams (user_id); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20250404151700_add_snapshots_sbx_started_at.sql b/packages/db/migrations/20250404151700_add_snapshots_sbx_started_at.sql new file mode 100644 index 0000000..9565e1b --- /dev/null +++ b/packages/db/migrations/20250404151700_add_snapshots_sbx_started_at.sql @@ -0,0 +1,37 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE "public"."snapshots" + ADD COLUMN IF NOT EXISTS "sandbox_started_at" timestamp with time zone NOT NULL + DEFAULT TIMESTAMP WITH TIME ZONE '1970-01-01 00:00:00 UTC'; + +-- Update records with actual data only if billing schema and table exist +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 FROM information_schema.schemata WHERE schema_name = 'billing' + ) AND EXISTS ( + SELECT 1 FROM information_schema.tables + WHERE table_schema = 'billing' AND table_name = 'sandbox_logs' + ) THEN + UPDATE public.snapshots s + SET sandbox_started_at = latest_starts.started_at + FROM ( + SELECT sandbox_id, MAX(started_at) as started_at + FROM billing.sandbox_logs + GROUP BY sandbox_id + ) latest_starts + WHERE s.sandbox_id = latest_starts.sandbox_id; + END IF; +END $$; + +-- Remove the default constraint after populating data +ALTER TABLE "public"."snapshots" +ALTER COLUMN "sandbox_started_at" DROP DEFAULT; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- +goose StatementEnd diff --git a/packages/db/migrations/20250409113306_add_envd_secured_to_snapshot.sql b/packages/db/migrations/20250409113306_add_envd_secured_to_snapshot.sql new file mode 100644 index 0000000..595feb3 --- /dev/null +++ b/packages/db/migrations/20250409113306_add_envd_secured_to_snapshot.sql @@ -0,0 +1,19 @@ +-- +goose Up +-- +goose StatementBegin +BEGIN; + +ALTER TABLE snapshots +ADD COLUMN env_secure boolean NOT NULL DEFAULT false; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +BEGIN; + +ALTER TABLE snapshots +DROP COLUMN IF EXISTS env_secure; + +COMMIT; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250506112836_builds_status_index.sql b/packages/db/migrations/20250506112836_builds_status_index.sql new file mode 100644 index 0000000..5d0c2c6 --- /dev/null +++ b/packages/db/migrations/20250506112836_builds_status_index.sql @@ -0,0 +1,7 @@ +-- +goose NO TRANSACTION +-- +goose Up +-- The index creation takes a lot of time +CREATE INDEX CONCURRENTLY idx_env_builds_status ON public.env_builds(status); + +-- +goose Down +DROP INDEX CONCURRENTLY public.idx_env_builds_status; diff --git a/packages/db/migrations/20250507134356_add_max_specs_to_tier.sql b/packages/db/migrations/20250507134356_add_max_specs_to_tier.sql new file mode 100644 index 0000000..b161e1a --- /dev/null +++ b/packages/db/migrations/20250507134356_add_max_specs_to_tier.sql @@ -0,0 +1,21 @@ +-- +goose Up +-- +goose StatementBegin +BEGIN; + +ALTER TABLE tiers + ADD COLUMN "max_vcpu" bigint NOT NULL default '8'::bigint, + ADD COLUMN "max_ram_mb" bigint NOT NULL DEFAULT '8096'::bigint; + +COMMIT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +BEGIN; + +ALTER TABLE tiers + DROP COLUMN IF EXISTS "max_vcpu", + DROP COLUMN IF EXISTS "max_ram_mb"; + +COMMIT; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250513111201_tier_fix_max_memory.sql b/packages/db/migrations/20250513111201_tier_fix_max_memory.sql new file mode 100644 index 0000000..08b7d7d --- /dev/null +++ b/packages/db/migrations/20250513111201_tier_fix_max_memory.sql @@ -0,0 +1,12 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE tiers + ALTER COLUMN "max_ram_mb" SET DEFAULT '8192'::bigint; +UPDATE tiers SET "max_ram_mb" = 8192 WHERE "max_ram_mb" = 8096; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE tiers + ALTER COLUMN "max_ram_mb" SET DEFAULT '8096'::bigint; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250522105042_users_teams_add_created_at.sql b/packages/db/migrations/20250522105042_users_teams_add_created_at.sql new file mode 100644 index 0000000..b7e11c4 --- /dev/null +++ b/packages/db/migrations/20250522105042_users_teams_add_created_at.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE public.users_teams ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE public.users_teams DROP COLUMN created_at; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250528203546_ready_command.sql b/packages/db/migrations/20250528203546_ready_command.sql new file mode 100644 index 0000000..088de2c --- /dev/null +++ b/packages/db/migrations/20250528203546_ready_command.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE public.env_builds + ADD COLUMN ready_cmd TEXT; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE public.env_builds +DROP COLUMN ready_cmd; +-- +goose StatementEnd \ No newline at end of file diff --git a/packages/db/migrations/20250606204750_optimize_hashed_key_schema.sql b/packages/db/migrations/20250606204750_optimize_hashed_key_schema.sql new file mode 100644 index 0000000..53e5447 --- /dev/null +++ b/packages/db/migrations/20250606204750_optimize_hashed_key_schema.sql @@ -0,0 +1,38 @@ +-- +goose Up +-- +goose StatementBegin + +-- Add new columns to team_api_keys table +ALTER TABLE team_api_keys + ADD COLUMN IF NOT EXISTS api_key_prefix VARCHAR(10), + ADD COLUMN IF NOT EXISTS api_key_length INTEGER, + ADD COLUMN IF NOT EXISTS api_key_mask_prefix VARCHAR(5), + ADD COLUMN IF NOT EXISTS api_key_mask_suffix VARCHAR(5); + +-- Add new columns to access_tokens table +ALTER TABLE access_tokens + ADD COLUMN IF NOT EXISTS access_token_prefix VARCHAR(10), + ADD COLUMN IF NOT EXISTS access_token_length INTEGER, + ADD COLUMN IF NOT EXISTS access_token_mask_prefix VARCHAR(5), + ADD COLUMN IF NOT EXISTS access_token_mask_suffix VARCHAR(5); + + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Remove the added columns from team_api_keys table +ALTER TABLE team_api_keys +DROP COLUMN IF EXISTS api_key_prefix, + DROP COLUMN IF EXISTS api_key_length, + DROP COLUMN IF EXISTS api_key_mask_prefix, + DROP COLUMN IF EXISTS api_key_mask_suffix; + +-- Remove the added columns from access_tokens table +ALTER TABLE access_tokens +DROP COLUMN IF EXISTS access_token_prefix, + DROP COLUMN IF EXISTS access_token_length, + DROP COLUMN IF EXISTS access_token_mask_prefix, + DROP COLUMN IF EXISTS access_token_mask_suffix; + +-- +goose StatementEnd diff --git a/packages/db/migrations/20250606213446_deployment_cluster.sql b/packages/db/migrations/20250606213446_deployment_cluster.sql new file mode 100644 index 0000000..12d28ce --- /dev/null +++ b/packages/db/migrations/20250606213446_deployment_cluster.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE IF NOT EXISTS clusters ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + endpoint TEXT NOT NULL, + endpoint_tls BOOLEAN NOT NULL DEFAULT TRUE, + token TEXT NOT NULL +); + +ALTER TABLE teams + ADD COLUMN IF NOT EXISTS cluster_id UUID NULL + REFERENCES clusters(id); + +CREATE INDEX IF NOT EXISTS teams_cluster_id_uq + ON teams (cluster_id) + WHERE cluster_id IS NOT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS teams_cluster_id_uq; + +ALTER TABLE teams DROP COLUMN IF EXISTS cluster_id; + +DROP TABLE IF EXISTS clusters CASCADE; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250624001047_deploy_cluster_policy.sql b/packages/db/migrations/20250624001047_deploy_cluster_policy.sql new file mode 100644 index 0000000..7bd185e --- /dev/null +++ b/packages/db/migrations/20250624001047_deploy_cluster_policy.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE "public"."clusters" ENABLE ROW LEVEL SECURITY; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE "public"."clusters" DISABLE ROW LEVEL SECURITY; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250624001048_cluster_for_templates.sql b/packages/db/migrations/20250624001048_cluster_for_templates.sql new file mode 100644 index 0000000..beb9e13 --- /dev/null +++ b/packages/db/migrations/20250624001048_cluster_for_templates.sql @@ -0,0 +1,17 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE envs + ADD COLUMN IF NOT EXISTS cluster_id UUID NULL + REFERENCES clusters(id); + +CREATE INDEX IF NOT EXISTS envs_cluster_id + ON envs (cluster_id) + WHERE cluster_id IS NOT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP INDEX IF EXISTS envs_cluster_id; + +ALTER TABLE envs DROP COLUMN IF EXISTS cluster_id; +-- +goose StatementEnd diff --git a/packages/db/migrations/20250624001049_cluster_for_builds.sql b/packages/db/migrations/20250624001049_cluster_for_builds.sql new file mode 100644 index 0000000..7e442a1 --- /dev/null +++ b/packages/db/migrations/20250624001049_cluster_for_builds.sql @@ -0,0 +1,10 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE env_builds + ADD COLUMN IF NOT EXISTS cluster_node_id TEXT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE env_builds DROP COLUMN IF EXISTS cluster_node_id; +-- +goose StatementEnd diff --git a/packages/db/queries/db.go b/packages/db/queries/db.go new file mode 100644 index 0000000..0591efe --- /dev/null +++ b/packages/db/queries/db.go @@ -0,0 +1,32 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 + +package queries + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" +) + +type DBTX interface { + Exec(context.Context, string, ...interface{}) (pgconn.CommandTag, error) + Query(context.Context, string, ...interface{}) (pgx.Rows, error) + QueryRow(context.Context, string, ...interface{}) pgx.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx pgx.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/packages/db/queries/get_active_clusters.sql b/packages/db/queries/get_active_clusters.sql new file mode 100644 index 0000000..4333048 --- /dev/null +++ b/packages/db/queries/get_active_clusters.sql @@ -0,0 +1,4 @@ +-- name: GetActiveClusters :many +SELECT DISTINCT sqlc.embed(c) +FROM public.clusters c +JOIN public.teams t ON t.cluster_id = c.id; diff --git a/packages/db/queries/get_active_clusters.sql.go b/packages/db/queries/get_active_clusters.sql.go new file mode 100644 index 0000000..c51cdb9 --- /dev/null +++ b/packages/db/queries/get_active_clusters.sql.go @@ -0,0 +1,45 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_active_clusters.sql + +package queries + +import ( + "context" +) + +const getActiveClusters = `-- name: GetActiveClusters :many +SELECT DISTINCT c.id, c.endpoint, c.endpoint_tls, c.token +FROM public.clusters c +JOIN public.teams t ON t.cluster_id = c.id +` + +type GetActiveClustersRow struct { + Cluster Cluster +} + +func (q *Queries) GetActiveClusters(ctx context.Context) ([]GetActiveClustersRow, error) { + rows, err := q.db.Query(ctx, getActiveClusters) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetActiveClustersRow + for rows.Next() { + var i GetActiveClustersRow + if err := rows.Scan( + &i.Cluster.ID, + &i.Cluster.Endpoint, + &i.Cluster.EndpointTls, + &i.Cluster.Token, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/queries/get_env_with_build.sql b/packages/db/queries/get_env_with_build.sql new file mode 100644 index 0000000..4f24001 --- /dev/null +++ b/packages/db/queries/get_env_with_build.sql @@ -0,0 +1,22 @@ +-- name: GetEnvWithBuild :one +-- get the env_id when querying by alias; if not, @alias_or_env_id should be env_id +WITH s AS NOT MATERIALIZED ( + SELECT ea.env_id as env_id + FROM public.env_aliases as ea + WHERE ea.alias = @alias_or_env_id + UNION + SELECT @alias_or_env_id as env_id +) + +SELECT sqlc.embed(e), sqlc.embed(eb), aliases +FROM s +JOIN public.envs AS e ON e.id = s.env_id +JOIN public.env_builds AS eb ON eb.env_id = e.id +AND eb.status = 'uploaded' +CROSS JOIN LATERAL ( + SELECT array_agg(alias)::text[] AS aliases + FROM public.env_aliases + WHERE env_id = e.id +) AS al +ORDER BY eb.finished_at DESC +LIMIT 1; diff --git a/packages/db/queries/get_env_with_build.sql.go b/packages/db/queries/get_env_with_build.sql.go new file mode 100644 index 0000000..bb34e17 --- /dev/null +++ b/packages/db/queries/get_env_with_build.sql.go @@ -0,0 +1,76 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_env_with_build.sql + +package queries + +import ( + "context" +) + +const getEnvWithBuild = `-- name: GetEnvWithBuild :one +WITH s AS NOT MATERIALIZED ( + SELECT ea.env_id as env_id + FROM public.env_aliases as ea + WHERE ea.alias = $1 + UNION + SELECT $1 as env_id +) + +SELECT e.id, e.created_at, e.updated_at, e.public, e.build_count, e.spawn_count, e.last_spawned_at, e.team_id, e.created_by, e.cluster_id, eb.id, eb.created_at, eb.updated_at, eb.finished_at, eb.status, eb.dockerfile, eb.start_cmd, eb.vcpu, eb.ram_mb, eb.free_disk_size_mb, eb.total_disk_size_mb, eb.kernel_version, eb.firecracker_version, eb.env_id, eb.envd_version, eb.ready_cmd, eb.cluster_node_id, aliases +FROM s +JOIN public.envs AS e ON e.id = s.env_id +JOIN public.env_builds AS eb ON eb.env_id = e.id +AND eb.status = 'uploaded' +CROSS JOIN LATERAL ( + SELECT array_agg(alias)::text[] AS aliases + FROM public.env_aliases + WHERE env_id = e.id +) AS al +ORDER BY eb.finished_at DESC +LIMIT 1 +` + +type GetEnvWithBuildRow struct { + Env Env + EnvBuild EnvBuild + Aliases []string +} + +// get the env_id when querying by alias; if not, @alias_or_env_id should be env_id +func (q *Queries) GetEnvWithBuild(ctx context.Context, aliasOrEnvID string) (GetEnvWithBuildRow, error) { + row := q.db.QueryRow(ctx, getEnvWithBuild, aliasOrEnvID) + var i GetEnvWithBuildRow + err := row.Scan( + &i.Env.ID, + &i.Env.CreatedAt, + &i.Env.UpdatedAt, + &i.Env.Public, + &i.Env.BuildCount, + &i.Env.SpawnCount, + &i.Env.LastSpawnedAt, + &i.Env.TeamID, + &i.Env.CreatedBy, + &i.Env.ClusterID, + &i.EnvBuild.ID, + &i.EnvBuild.CreatedAt, + &i.EnvBuild.UpdatedAt, + &i.EnvBuild.FinishedAt, + &i.EnvBuild.Status, + &i.EnvBuild.Dockerfile, + &i.EnvBuild.StartCmd, + &i.EnvBuild.Vcpu, + &i.EnvBuild.RamMb, + &i.EnvBuild.FreeDiskSizeMb, + &i.EnvBuild.TotalDiskSizeMb, + &i.EnvBuild.KernelVersion, + &i.EnvBuild.FirecrackerVersion, + &i.EnvBuild.EnvID, + &i.EnvBuild.EnvdVersion, + &i.EnvBuild.ReadyCmd, + &i.EnvBuild.ClusterNodeID, + &i.Aliases, + ) + return i, err +} diff --git a/packages/db/queries/get_inprogress_builds.sql b/packages/db/queries/get_inprogress_builds.sql new file mode 100644 index 0000000..4bfe261 --- /dev/null +++ b/packages/db/queries/get_inprogress_builds.sql @@ -0,0 +1,8 @@ +-- name: GetInProgressTemplateBuilds :many +SELECT sqlc.embed(t), sqlc.embed(e), sqlc.embed(b) +FROM public.env_builds b +JOIN public.envs e ON e.id = b.env_id +JOIN public.teams t ON e.team_id = t.id +WHERE b.status = 'waiting' OR b.status = 'building' +ORDER BY b.created_at DESC; + diff --git a/packages/db/queries/get_inprogress_builds.sql.go b/packages/db/queries/get_inprogress_builds.sql.go new file mode 100644 index 0000000..c5c6afd --- /dev/null +++ b/packages/db/queries/get_inprogress_builds.sql.go @@ -0,0 +1,82 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_inprogress_builds.sql + +package queries + +import ( + "context" +) + +const getInProgressTemplateBuilds = `-- name: GetInProgressTemplateBuilds :many +SELECT t.id, t.created_at, t.is_blocked, t.name, t.tier, t.email, t.is_banned, t.blocked_reason, t.cluster_id, e.id, e.created_at, e.updated_at, e.public, e.build_count, e.spawn_count, e.last_spawned_at, e.team_id, e.created_by, e.cluster_id, b.id, b.created_at, b.updated_at, b.finished_at, b.status, b.dockerfile, b.start_cmd, b.vcpu, b.ram_mb, b.free_disk_size_mb, b.total_disk_size_mb, b.kernel_version, b.firecracker_version, b.env_id, b.envd_version, b.ready_cmd, b.cluster_node_id +FROM public.env_builds b +JOIN public.envs e ON e.id = b.env_id +JOIN public.teams t ON e.team_id = t.id +WHERE b.status = 'waiting' OR b.status = 'building' +ORDER BY b.created_at DESC +` + +type GetInProgressTemplateBuildsRow struct { + Team Team + Env Env + EnvBuild EnvBuild +} + +func (q *Queries) GetInProgressTemplateBuilds(ctx context.Context) ([]GetInProgressTemplateBuildsRow, error) { + rows, err := q.db.Query(ctx, getInProgressTemplateBuilds) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetInProgressTemplateBuildsRow + for rows.Next() { + var i GetInProgressTemplateBuildsRow + if err := rows.Scan( + &i.Team.ID, + &i.Team.CreatedAt, + &i.Team.IsBlocked, + &i.Team.Name, + &i.Team.Tier, + &i.Team.Email, + &i.Team.IsBanned, + &i.Team.BlockedReason, + &i.Team.ClusterID, + &i.Env.ID, + &i.Env.CreatedAt, + &i.Env.UpdatedAt, + &i.Env.Public, + &i.Env.BuildCount, + &i.Env.SpawnCount, + &i.Env.LastSpawnedAt, + &i.Env.TeamID, + &i.Env.CreatedBy, + &i.Env.ClusterID, + &i.EnvBuild.ID, + &i.EnvBuild.CreatedAt, + &i.EnvBuild.UpdatedAt, + &i.EnvBuild.FinishedAt, + &i.EnvBuild.Status, + &i.EnvBuild.Dockerfile, + &i.EnvBuild.StartCmd, + &i.EnvBuild.Vcpu, + &i.EnvBuild.RamMb, + &i.EnvBuild.FreeDiskSizeMb, + &i.EnvBuild.TotalDiskSizeMb, + &i.EnvBuild.KernelVersion, + &i.EnvBuild.FirecrackerVersion, + &i.EnvBuild.EnvID, + &i.EnvBuild.EnvdVersion, + &i.EnvBuild.ReadyCmd, + &i.EnvBuild.ClusterNodeID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/queries/get_last_snapshot.sql b/packages/db/queries/get_last_snapshot.sql new file mode 100644 index 0000000..6ce496f --- /dev/null +++ b/packages/db/queries/get_last_snapshot.sql @@ -0,0 +1,13 @@ +-- name: GetLastSnapshot :one +SELECT COALESCE(ea.aliases, ARRAY[]::text[])::text[] AS aliases, sqlc.embed(s), sqlc.embed(eb) +FROM "public"."snapshots" s +JOIN "public"."envs" e ON s.env_id = e.id +JOIN "public"."env_builds" eb ON e.id = eb.env_id +LEFT JOIN LATERAL ( + SELECT ARRAY_AGG(alias ORDER BY alias) AS aliases + FROM "public"."env_aliases" + WHERE env_id = s.base_env_id +) ea ON TRUE +WHERE s.sandbox_id = $1 AND eb.status = 'success' AND e.team_id = $2 +ORDER BY eb.finished_at DESC +LIMIT 1; diff --git a/packages/db/queries/get_last_snapshot.sql.go b/packages/db/queries/get_last_snapshot.sql.go new file mode 100644 index 0000000..6232273 --- /dev/null +++ b/packages/db/queries/get_last_snapshot.sql.go @@ -0,0 +1,72 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_last_snapshot.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const getLastSnapshot = `-- name: GetLastSnapshot :one +SELECT COALESCE(ea.aliases, ARRAY[]::text[])::text[] AS aliases, s.created_at, s.env_id, s.sandbox_id, s.id, s.metadata, s.base_env_id, s.sandbox_started_at, s.env_secure, eb.id, eb.created_at, eb.updated_at, eb.finished_at, eb.status, eb.dockerfile, eb.start_cmd, eb.vcpu, eb.ram_mb, eb.free_disk_size_mb, eb.total_disk_size_mb, eb.kernel_version, eb.firecracker_version, eb.env_id, eb.envd_version, eb.ready_cmd, eb.cluster_node_id +FROM "public"."snapshots" s +JOIN "public"."envs" e ON s.env_id = e.id +JOIN "public"."env_builds" eb ON e.id = eb.env_id +LEFT JOIN LATERAL ( + SELECT ARRAY_AGG(alias ORDER BY alias) AS aliases + FROM "public"."env_aliases" + WHERE env_id = s.base_env_id +) ea ON TRUE +WHERE s.sandbox_id = $1 AND eb.status = 'success' AND e.team_id = $2 +ORDER BY eb.finished_at DESC +LIMIT 1 +` + +type GetLastSnapshotParams struct { + SandboxID string + TeamID uuid.UUID +} + +type GetLastSnapshotRow struct { + Aliases []string + Snapshot Snapshot + EnvBuild EnvBuild +} + +func (q *Queries) GetLastSnapshot(ctx context.Context, arg GetLastSnapshotParams) (GetLastSnapshotRow, error) { + row := q.db.QueryRow(ctx, getLastSnapshot, arg.SandboxID, arg.TeamID) + var i GetLastSnapshotRow + err := row.Scan( + &i.Aliases, + &i.Snapshot.CreatedAt, + &i.Snapshot.EnvID, + &i.Snapshot.SandboxID, + &i.Snapshot.ID, + &i.Snapshot.Metadata, + &i.Snapshot.BaseEnvID, + &i.Snapshot.SandboxStartedAt, + &i.Snapshot.EnvSecure, + &i.EnvBuild.ID, + &i.EnvBuild.CreatedAt, + &i.EnvBuild.UpdatedAt, + &i.EnvBuild.FinishedAt, + &i.EnvBuild.Status, + &i.EnvBuild.Dockerfile, + &i.EnvBuild.StartCmd, + &i.EnvBuild.Vcpu, + &i.EnvBuild.RamMb, + &i.EnvBuild.FreeDiskSizeMb, + &i.EnvBuild.TotalDiskSizeMb, + &i.EnvBuild.KernelVersion, + &i.EnvBuild.FirecrackerVersion, + &i.EnvBuild.EnvID, + &i.EnvBuild.EnvdVersion, + &i.EnvBuild.ReadyCmd, + &i.EnvBuild.ClusterNodeID, + ) + return i, err +} diff --git a/packages/db/queries/get_snapshots_with_cursor.sql b/packages/db/queries/get_snapshots_with_cursor.sql new file mode 100644 index 0000000..8e2fabe --- /dev/null +++ b/packages/db/queries/get_snapshots_with_cursor.sql @@ -0,0 +1,29 @@ +-- name: GetSnapshotsWithCursor :many +SELECT COALESCE(ea.aliases, ARRAY[]::text[])::text[] AS aliases, sqlc.embed(s), sqlc.embed(eb) +FROM "public"."snapshots" s +JOIN "public"."envs" e ON e.id = s.env_id +LEFT JOIN LATERAL ( + SELECT ARRAY_AGG(alias ORDER BY alias) AS aliases + FROM "public"."env_aliases" + WHERE env_id = s.base_env_id +) ea ON TRUE +JOIN LATERAL ( + SELECT eb.* + FROM "public"."env_builds" eb + WHERE + eb.env_id = s.env_id + AND eb.status = 'success' + ORDER BY eb.created_at DESC + LIMIT 1 +) eb ON TRUE +WHERE + e.team_id = @team_id + AND s.metadata @> @metadata + AND ( + s.created_at < @cursor_time + OR + (s.created_at = @cursor_time AND s.sandbox_id > @cursor_id) + ) + AND NOT (s.sandbox_id = ANY (@snapshot_exclude_sbx_ids::text[])) +ORDER BY s.created_at DESC, s.sandbox_id +LIMIT $1; diff --git a/packages/db/queries/get_snapshots_with_cursor.sql.go b/packages/db/queries/get_snapshots_with_cursor.sql.go new file mode 100644 index 0000000..356d0d9 --- /dev/null +++ b/packages/db/queries/get_snapshots_with_cursor.sql.go @@ -0,0 +1,114 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_snapshots_with_cursor.sql + +package queries + +import ( + "context" + + "github.com/e2b-dev/infra/packages/db/types" + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" +) + +const getSnapshotsWithCursor = `-- name: GetSnapshotsWithCursor :many +SELECT COALESCE(ea.aliases, ARRAY[]::text[])::text[] AS aliases, s.created_at, s.env_id, s.sandbox_id, s.id, s.metadata, s.base_env_id, s.sandbox_started_at, s.env_secure, eb.id, eb.created_at, eb.updated_at, eb.finished_at, eb.status, eb.dockerfile, eb.start_cmd, eb.vcpu, eb.ram_mb, eb.free_disk_size_mb, eb.total_disk_size_mb, eb.kernel_version, eb.firecracker_version, eb.env_id, eb.envd_version, eb.ready_cmd, eb.cluster_node_id +FROM "public"."snapshots" s +JOIN "public"."envs" e ON e.id = s.env_id +LEFT JOIN LATERAL ( + SELECT ARRAY_AGG(alias ORDER BY alias) AS aliases + FROM "public"."env_aliases" + WHERE env_id = s.base_env_id +) ea ON TRUE +JOIN LATERAL ( + SELECT eb.id, eb.created_at, eb.updated_at, eb.finished_at, eb.status, eb.dockerfile, eb.start_cmd, eb.vcpu, eb.ram_mb, eb.free_disk_size_mb, eb.total_disk_size_mb, eb.kernel_version, eb.firecracker_version, eb.env_id, eb.envd_version, eb.ready_cmd, eb.cluster_node_id + FROM "public"."env_builds" eb + WHERE + eb.env_id = s.env_id + AND eb.status = 'success' + ORDER BY eb.created_at DESC + LIMIT 1 +) eb ON TRUE +WHERE + e.team_id = $2 + AND s.metadata @> $3 + AND ( + s.created_at < $4 + OR + (s.created_at = $4 AND s.sandbox_id > $5) + ) + AND NOT (s.sandbox_id = ANY ($6::text[])) +ORDER BY s.created_at DESC, s.sandbox_id +LIMIT $1 +` + +type GetSnapshotsWithCursorParams struct { + Limit int32 + TeamID uuid.UUID + Metadata types.JSONBStringMap + CursorTime pgtype.Timestamptz + CursorID string + SnapshotExcludeSbxIds []string +} + +type GetSnapshotsWithCursorRow struct { + Aliases []string + Snapshot Snapshot + EnvBuild EnvBuild +} + +func (q *Queries) GetSnapshotsWithCursor(ctx context.Context, arg GetSnapshotsWithCursorParams) ([]GetSnapshotsWithCursorRow, error) { + rows, err := q.db.Query(ctx, getSnapshotsWithCursor, + arg.Limit, + arg.TeamID, + arg.Metadata, + arg.CursorTime, + arg.CursorID, + arg.SnapshotExcludeSbxIds, + ) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetSnapshotsWithCursorRow + for rows.Next() { + var i GetSnapshotsWithCursorRow + if err := rows.Scan( + &i.Aliases, + &i.Snapshot.CreatedAt, + &i.Snapshot.EnvID, + &i.Snapshot.SandboxID, + &i.Snapshot.ID, + &i.Snapshot.Metadata, + &i.Snapshot.BaseEnvID, + &i.Snapshot.SandboxStartedAt, + &i.Snapshot.EnvSecure, + &i.EnvBuild.ID, + &i.EnvBuild.CreatedAt, + &i.EnvBuild.UpdatedAt, + &i.EnvBuild.FinishedAt, + &i.EnvBuild.Status, + &i.EnvBuild.Dockerfile, + &i.EnvBuild.StartCmd, + &i.EnvBuild.Vcpu, + &i.EnvBuild.RamMb, + &i.EnvBuild.FreeDiskSizeMb, + &i.EnvBuild.TotalDiskSizeMb, + &i.EnvBuild.KernelVersion, + &i.EnvBuild.FirecrackerVersion, + &i.EnvBuild.EnvID, + &i.EnvBuild.EnvdVersion, + &i.EnvBuild.ReadyCmd, + &i.EnvBuild.ClusterNodeID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/queries/models.go b/packages/db/queries/models.go new file mode 100644 index 0000000..92a15d9 --- /dev/null +++ b/packages/db/queries/models.go @@ -0,0 +1,137 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 + +package queries + +import ( + "time" + + "github.com/e2b-dev/infra/packages/db/types" + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" +) + +type AccessToken struct { + AccessToken string + UserID uuid.UUID + CreatedAt time.Time + ID *uuid.UUID + // sensitive + AccessTokenHash *string + AccessTokenMask *string + Name string + AccessTokenPrefix *string + AccessTokenLength *int32 + AccessTokenMaskPrefix *string + AccessTokenMaskSuffix *string +} + +type Cluster struct { + ID uuid.UUID + Endpoint string + EndpointTls bool + Token string +} + +type Env struct { + ID string + CreatedAt time.Time + UpdatedAt time.Time + Public bool + BuildCount int32 + // Number of times the env was spawned + SpawnCount int64 + // Timestamp of the last time the env was spawned + LastSpawnedAt *time.Time + TeamID uuid.UUID + CreatedBy *uuid.UUID + ClusterID *uuid.UUID +} + +type EnvAlias struct { + Alias string + IsRenamable bool + EnvID string +} + +type EnvBuild struct { + ID uuid.UUID + CreatedAt time.Time + UpdatedAt time.Time + FinishedAt *time.Time + Status string + Dockerfile *string + StartCmd *string + Vcpu int64 + RamMb int64 + FreeDiskSizeMb int64 + TotalDiskSizeMb *int64 + KernelVersion string + FirecrackerVersion string + EnvID *string + EnvdVersion *string + ReadyCmd *string + ClusterNodeID *string +} + +type Snapshot struct { + CreatedAt pgtype.Timestamptz + EnvID string + SandboxID string + ID uuid.UUID + Metadata types.JSONBStringMap + BaseEnvID string + SandboxStartedAt pgtype.Timestamptz + EnvSecure bool +} + +type Team struct { + ID uuid.UUID + CreatedAt time.Time + IsBlocked bool + Name string + Tier string + Email string + IsBanned bool + BlockedReason *string + ClusterID *uuid.UUID +} + +type TeamApiKey struct { + ApiKey string + CreatedAt time.Time + TeamID uuid.UUID + UpdatedAt *time.Time + Name string + LastUsed *time.Time + CreatedBy *uuid.UUID + ID uuid.UUID + // sensitive + ApiKeyHash *string + ApiKeyMask *string + ApiKeyPrefix *string + ApiKeyLength *int32 + ApiKeyMaskPrefix *string + ApiKeyMaskSuffix *string +} + +type Tier struct { + ID string + Name string + DiskMb int64 + // The number of instances the team can run concurrently + ConcurrentInstances int64 + MaxLengthHours int64 + MaxVcpu int64 + MaxRamMb int64 +} + +type UsersTeam struct { + ID int64 + UserID uuid.UUID + TeamID uuid.UUID + IsDefault bool + AddedBy *uuid.UUID + CreatedAt pgtype.Timestamp +} diff --git a/packages/db/queries/teams___tiers__usersteams.sql b/packages/db/queries/teams___tiers__usersteams.sql new file mode 100644 index 0000000..1ec6749 --- /dev/null +++ b/packages/db/queries/teams___tiers__usersteams.sql @@ -0,0 +1,6 @@ +-- name: GetTeamsWithUsersTeamsWithTier :many +SELECT sqlc.embed(t), sqlc.embed(ut), sqlc.embed(tier) +FROM "public"."teams" t +JOIN "public"."tiers" tier ON t.tier = tier.id +JOIN "public"."users_teams" ut ON ut.team_id = t.id +WHERE ut.user_id = $1; diff --git a/packages/db/queries/teams___tiers__usersteams.sql.go b/packages/db/queries/teams___tiers__usersteams.sql.go new file mode 100644 index 0000000..4060b61 --- /dev/null +++ b/packages/db/queries/teams___tiers__usersteams.sql.go @@ -0,0 +1,69 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: teams___tiers__usersteams.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const getTeamsWithUsersTeamsWithTier = `-- name: GetTeamsWithUsersTeamsWithTier :many +SELECT t.id, t.created_at, t.is_blocked, t.name, t.tier, t.email, t.is_banned, t.blocked_reason, t.cluster_id, ut.id, ut.user_id, ut.team_id, ut.is_default, ut.added_by, ut.created_at, tier.id, tier.name, tier.disk_mb, tier.concurrent_instances, tier.max_length_hours, tier.max_vcpu, tier.max_ram_mb +FROM "public"."teams" t +JOIN "public"."tiers" tier ON t.tier = tier.id +JOIN "public"."users_teams" ut ON ut.team_id = t.id +WHERE ut.user_id = $1 +` + +type GetTeamsWithUsersTeamsWithTierRow struct { + Team Team + UsersTeam UsersTeam + Tier Tier +} + +func (q *Queries) GetTeamsWithUsersTeamsWithTier(ctx context.Context, userID uuid.UUID) ([]GetTeamsWithUsersTeamsWithTierRow, error) { + rows, err := q.db.Query(ctx, getTeamsWithUsersTeamsWithTier, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetTeamsWithUsersTeamsWithTierRow + for rows.Next() { + var i GetTeamsWithUsersTeamsWithTierRow + if err := rows.Scan( + &i.Team.ID, + &i.Team.CreatedAt, + &i.Team.IsBlocked, + &i.Team.Name, + &i.Team.Tier, + &i.Team.Email, + &i.Team.IsBanned, + &i.Team.BlockedReason, + &i.Team.ClusterID, + &i.UsersTeam.ID, + &i.UsersTeam.UserID, + &i.UsersTeam.TeamID, + &i.UsersTeam.IsDefault, + &i.UsersTeam.AddedBy, + &i.UsersTeam.CreatedAt, + &i.Tier.ID, + &i.Tier.Name, + &i.Tier.DiskMb, + &i.Tier.ConcurrentInstances, + &i.Tier.MaxLengthHours, + &i.Tier.MaxVcpu, + &i.Tier.MaxRamMb, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/queries/teams_usersteams_join.sql b/packages/db/queries/teams_usersteams_join.sql new file mode 100644 index 0000000..e6654dc --- /dev/null +++ b/packages/db/queries/teams_usersteams_join.sql @@ -0,0 +1,5 @@ +-- name: GetTeamsWithUsersTeams :many +SELECT sqlc.embed(t), sqlc.embed(ut) +FROM "public"."teams" t +JOIN "public"."users_teams" ut ON ut.team_id = t.id +WHERE ut.user_id = $1; diff --git a/packages/db/queries/teams_usersteams_join.sql.go b/packages/db/queries/teams_usersteams_join.sql.go new file mode 100644 index 0000000..31d5561 --- /dev/null +++ b/packages/db/queries/teams_usersteams_join.sql.go @@ -0,0 +1,60 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: teams_usersteams_join.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const getTeamsWithUsersTeams = `-- name: GetTeamsWithUsersTeams :many +SELECT t.id, t.created_at, t.is_blocked, t.name, t.tier, t.email, t.is_banned, t.blocked_reason, t.cluster_id, ut.id, ut.user_id, ut.team_id, ut.is_default, ut.added_by, ut.created_at +FROM "public"."teams" t +JOIN "public"."users_teams" ut ON ut.team_id = t.id +WHERE ut.user_id = $1 +` + +type GetTeamsWithUsersTeamsRow struct { + Team Team + UsersTeam UsersTeam +} + +func (q *Queries) GetTeamsWithUsersTeams(ctx context.Context, userID uuid.UUID) ([]GetTeamsWithUsersTeamsRow, error) { + rows, err := q.db.Query(ctx, getTeamsWithUsersTeams, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetTeamsWithUsersTeamsRow + for rows.Next() { + var i GetTeamsWithUsersTeamsRow + if err := rows.Scan( + &i.Team.ID, + &i.Team.CreatedAt, + &i.Team.IsBlocked, + &i.Team.Name, + &i.Team.Tier, + &i.Team.Email, + &i.Team.IsBanned, + &i.Team.BlockedReason, + &i.Team.ClusterID, + &i.UsersTeam.ID, + &i.UsersTeam.UserID, + &i.UsersTeam.TeamID, + &i.UsersTeam.IsDefault, + &i.UsersTeam.AddedBy, + &i.UsersTeam.CreatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/scripts/migrator.go b/packages/db/scripts/migrator.go new file mode 100644 index 0000000..1ae09c5 --- /dev/null +++ b/packages/db/scripts/migrator.go @@ -0,0 +1,75 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "log" + "os" + + _ "github.com/lib/pq" + "github.com/pressly/goose/v3" + "github.com/pressly/goose/v3/database" + "github.com/pressly/goose/v3/lock" +) + +const ( + trackingTable = "_migrations" + migrationsDir = "./migrations" +) + +func main() { + fmt.Printf("Starting migrations...\n") + ctx := context.Background() + + dbString := os.Getenv("POSTGRES_CONNECTION_STRING") + if dbString == "" { + log.Fatal("Database connection string is required. Set POSTGRES_CONNECTION_STRING env var.") + } + + db, err := sql.Open("postgres", dbString) + if err != nil { + log.Fatalf("failed to open DB: %v", err) + } + defer func() { + err := db.Close() + if err != nil { + log.Printf("failed to close DB: %v\n", err) + } + }() + + // Create a session locking + sessionLocker, err := lock.NewPostgresSessionLocker() + if err != nil { + log.Fatalf("failed to create session locker: %v", err) + } + + // We have to use custom store to use a custom tracking table + store, err := database.NewStore(goose.DialectPostgres, trackingTable) + if err != nil { + log.Fatalf("failed to create database store: %v", err) + } + + migrationsFS := os.DirFS(migrationsDir) + provider, err := goose.NewProvider( + "", // Has to empty when using a custom store + db, + migrationsFS, + goose.WithStore(store), + goose.WithSessionLocker(sessionLocker), + ) + if err != nil { + log.Fatalf("failed to create goose provider: %v", err) + } + + results, err := provider.Up(ctx) + if err != nil { + log.Fatalf("failed to apply migrations: %v", err) + } + + for _, res := range results { + fmt.Printf("Applied migration %s %s (%s)\n", res.Direction, res.Source.Path, res.Duration) + } + + fmt.Println("Migrations applied successfully.") +} diff --git a/packages/db/sqlc.yaml b/packages/db/sqlc.yaml new file mode 100644 index 0000000..95f64e0 --- /dev/null +++ b/packages/db/sqlc.yaml @@ -0,0 +1,41 @@ +version: "2" +sql: + - engine: "postgresql" + queries: "queries" + schema: "migrations" + gen: + go: + emit_pointers_for_null_types: true + package: "queries" + out: "queries/" + sql_package: "pgx/v5" + overrides: + - db_type: "uuid" + go_type: + import: "github.com/google/uuid" + type: "UUID" + - db_type: "uuid" + nullable: true + go_type: + import: "github.com/google/uuid" + type: "UUID" + pointer: true + + - db_type: "pg_catalog.numeric" + go_type: "github.com/shopspring/decimal.Decimal" + - db_type: "pg_catalog.numeric" + nullable: true + go_type: "*github.com/shopspring/decimal.Decimal" + + - db_type: "timestamptz" + go_type: "time.Time" + - db_type: "timestamptz" + go_type: + import: "time" + type: "Time" + pointer: true + nullable: true + + - db_type: "jsonb" + go_type: "github.com/e2b-dev/infra/packages/db/types.JSONBStringMap" + nullable: true diff --git a/packages/db/types/types.go b/packages/db/types/types.go new file mode 100644 index 0000000..3b6c73a --- /dev/null +++ b/packages/db/types/types.go @@ -0,0 +1,3 @@ +package types + +type JSONBStringMap map[string]string diff --git a/packages/envd/go.mod b/packages/envd/go.mod index 1ab345e..ff2a75a 100644 --- a/packages/envd/go.mod +++ b/packages/envd/go.mod @@ -1,27 +1,27 @@ module github.com/e2b-dev/infra/packages/envd -go 1.24 +go 1.24.3 require ( connectrpc.com/authn v0.1.0 - connectrpc.com/connect v1.16.2 + connectrpc.com/connect v1.18.1 connectrpc.com/cors v0.1.0 github.com/creack/pty v1.1.18 - github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/e2b-dev/fsnotify v0.0.0-20241216145137-2fe5d32bcb51 + github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/go-chi/chi/v5 v5.0.12 github.com/oapi-codegen/oapi-codegen/v2 v2.4.1 github.com/oapi-codegen/runtime v1.1.1 github.com/rs/cors v1.11.0 github.com/rs/zerolog v1.33.0 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.36.6 ) require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/dchest/uniuri v1.2.0 // indirect github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 // indirect - github.com/getkin/kin-openapi latest // indirect + github.com/getkin/kin-openapi v0.132.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/google/uuid v1.6.0 // indirect @@ -34,9 +34,9 @@ require ( github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/speakeasy-api/openapi-overlay v0.9.0 // indirect github.com/vmware-labs/yaml-jsonpath v0.3.2 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.25.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/packages/envd/go.sum b/packages/envd/go.sum index c6c9217..96d1502 100644 --- a/packages/envd/go.sum +++ b/packages/envd/go.sum @@ -2,6 +2,7 @@ connectrpc.com/authn v0.1.0 h1:m5weACjLWwgwcjttvUDyTPICJKw74+p2obBVrf8hT9E= connectrpc.com/authn v0.1.0/go.mod h1:AwNZK/KYbqaJzRYadTuAaoz6sYQSPdORPqh1TOPIkgY= connectrpc.com/connect v1.16.2 h1:ybd6y+ls7GOlb7Bh5C8+ghA6SvCBajHwxssO2CGFjqE= connectrpc.com/connect v1.16.2/go.mod h1:n2kgwskMHXC+lVqb18wngEpF95ldBHXjZYJussz5FRc= +connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= connectrpc.com/cors v0.1.0 h1:f3gTXJyDZPrDIZCQ567jxfD9PAIpopHiRDnJRt3QuOQ= connectrpc.com/cors v0.1.0/go.mod h1:v8SJZCPfHtGH1zsm+Ttajpozd4cYIUryl4dFB6QEpfg= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= @@ -23,12 +24,14 @@ github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4 github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960/go.mod h1:9HQzr9D/0PGwMEbC3d5AB7oi67+h4TsQqItC1GVYG58= github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 h1:PRxIJD8XjimM5aTknUK9w6DHLDox2r2M3DI4i2pnd3w= github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936/go.mod h1:ttYvX5qlB+mlV1okblJqcSMtR4c52UKxDiX9GRBS8+Q= +github.com/e2b-dev/fsnotify v0.0.0-20241216145137-2fe5d32bcb51/go.mod h1:49MToyZ6q0q2rwa5A77Gdh9p3gqmoID22vEJeAYyNDs= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getkin/kin-openapi v0.127.0 h1:Mghqi3Dhryf3F8vR370nN67pAERW+3a95vomb3MAREY= github.com/getkin/kin-openapi v0.127.0/go.mod h1:OZrfXzUfGrNbsKj+xmFBx6E5c6yH3At/tAKSc2UszXM= +github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= @@ -136,6 +139,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -169,6 +173,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -177,6 +182,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -196,6 +202,7 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/packages/orchestrator/.grype.yaml b/packages/orchestrator/.grype.yaml deleted file mode 100644 index 00c5d3a..0000000 --- a/packages/orchestrator/.grype.yaml +++ /dev/null @@ -1,47 +0,0 @@ -ignore: - - vulnerability: CVE-2023-39325 - package: - name: stdlib - version: go1.21.0 - - vulnerability: CVE-2023-45288 - package: - name: stdlib - version: go1.21.0 - - vulnerability: GHSA-rcjv-mgp8-qvmr - package: - name: go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin - - vulnerability: GHSA-2c4m-59x9-fr2g - package: - name: github.com/gin-gonic/gin - - vulnerability: GHSA-3vp4-m3rf-835h - package: - name: github.com/gin-gonic/gin - - vulnerability: GHSA-hcg3-q754-cr77 - package: - name: golang.org/x/crypto - - vulnerability: GHSA-vvgc-356p-c3xw - package: - name: golang.org/x/net - - vulnerability: GHSA-qxp5-gwg8-xv66 - package: - name: golang.org/x/net - - vulnerability: CVE-2023-39325 - package: - name: stdlib - version: go1.21.0 - - vulnerability: CVE-2023-39232 - package: - name: stdlib - version: go1.21.0 - - vulnerability: CVE-2023-45283 - package: - name: stdlib - version: go1.21.0 - - vulnerability: CVE-2023-44487 - package: - name: stdlib - version: go1.21.0 - - vulnerability: CVE-2023-45288 - package: - name: stdlib - version: go1.21.0 diff --git a/packages/orchestrator/Dockerfile b/packages/orchestrator/Dockerfile index a4b54e2..902d3bd 100644 --- a/packages/orchestrator/Dockerfile +++ b/packages/orchestrator/Dockerfile @@ -1,42 +1,23 @@ FROM golang:1.24 AS builder -# A simpler approach - use a temporary directory structure to build -WORKDIR /app +WORKDIR /build/shared -# Setup shared module first -WORKDIR /app/shared COPY .shared/go.mod .shared/go.sum ./ -COPY .shared/pkg ./pkg +RUN go mod download -# Add AWS dependencies -RUN go get github.com/aws/aws-sdk-go-v2@latest -RUN go get github.com/aws/aws-sdk-go-v2/config@latest -RUN go get github.com/aws/aws-sdk-go-v2/service/s3@latest -RUN go get github.com/cenkalti/backoff/v4@latest -RUN go mod tidy +COPY .shared/pkg pkg -# Setup orchestrator module next -WORKDIR /app/orchestrator -COPY go.mod go.sum ./ +WORKDIR /build/orchestrator -# Now create the go.work file at root level -WORKDIR /app -RUN echo 'go 1.23.0\n\nuse (\n\t./shared\n\t./orchestrator\n)' > go.work -RUN go work sync +COPY go.mod go.sum ./ +RUN go mod download -# Return to orchestrator module to finish the build -WORKDIR /app/orchestrator COPY main.go Makefile ./ -COPY internal/ ./internal/ - -# Download dependencies before building -RUN go mod download -RUN go mod tidy +COPY internal internal ARG COMMIT_SHA -# Build directly instead of using make to avoid issues -RUN --mount=type=cache,target=/root/.cache/go-build CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/orchestrator -ldflags "-X=main.commitSHA=${COMMIT_SHA}" . +RUN --mount=type=cache,target=/root/.cache/go-build make build-local COMMIT_SHA=${COMMIT_SHA} FROM scratch -COPY --from=builder /app/orchestrator/bin/orchestrator . +COPY --from=builder /build/orchestrator/bin/orchestrator . diff --git a/packages/orchestrator/Makefile b/packages/orchestrator/Makefile index cb111ff..4a0fc97 100644 --- a/packages/orchestrator/Makefile +++ b/packages/orchestrator/Makefile @@ -33,9 +33,18 @@ build-local: build-debug: CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -race -gcflags=all="-N -l" -o bin/orchestrator . -.PHONY: upload-gcp -upload-gcp: - ./upload.sh $(GCP_PROJECT_ID) +.PHONY: run-debug +run-debug: + make build-debug + sudo -E GOTRACEBACK=crash \ + GODEBUG=madvdontneed=1 \ + NODE_ID="testclient" \ + TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) \ + ENVD_TIMEOUT=$(ENVD_TIMEOUT) \ + ORCHESTRATOR_SERVICES=$(ORCHESTRATOR_SERVICES) \ + GCP_DOCKER_REPOSITORY_NAME=$(GCP_DOCKER_REPOSITORY_NAME) \ + GOOGLE_SERVICE_ACCOUNT_BASE64=$(GOOGLE_SERVICE_ACCOUNT_BASE64) \ + ./bin/orchestrator .PHONY: upload-aws upload-aws: @@ -76,57 +85,35 @@ build-and-upload: .PHONY: mock mock: - sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) CONSUL_TOKEN=$(CONSUL_TOKEN) NODE_ID="test-client" go run cmd/mock-sandbox/mock.go -template 5wzg6c91u51yaebviysf -build "f0370054-b669-eeee-b33b-573d5287c6ef" -alive 1 -count 2 - -.PHONY: mock-aws -mock-aws: - sudo AWS_ENABLED=true \ - TEMPLATE_AWS_BUCKET_NAME=$(TEMPLATE_AWS_BUCKET_NAME) \ - AWS_REGION=$(AWS_REGION) \ - AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) \ - AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) \ - CONSUL_TOKEN=$(CONSUL_TOKEN) \ - TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) \ - NODE_ID="test-client-aws" \ - go run cmd/mock-sandbox/mock.go -template $(TEMPLATE_ID) -build $(BUILD_ID) -alive 1 -count 2 + sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) CONSUL_TOKEN=$(CONSUL_TOKEN) NODE_ID="testclient" go run cmd/mock-sandbox/mock.go -template 5wzg6c91u51yaebviysf -build "f0370054-b669-eeee-b33b-573d5287c6ef" -alive 1 -count 2 .PHONY: mock-nbd mock-nbd: sudo go run -gcflags=all="-N -l" cmd/mock-nbd/mock.go -.PHONY: killall -killall: - gcloud compute instance-groups list-instances $(PREFIX)orch-client-ig \ - --zone=$(GCP_ZONE) \ - --project=$(GCP_PROJECT_ID) \ - --format="value(instance)" \ - --quiet | xargs -I {} -P 5 sh -c "gcloud compute ssh {} --project=$(GCP_PROJECT_ID) --zone=$(GCP_ZONE) --command='sudo killall -9 firecracker'" - @echo "Killing all firecracker processes" - -.PHONY: kill-old -kill-old: - gcloud compute instance-groups list-instances $(PREFIX)orch-client-ig \ - --zone=$(GCP_ZONE) \ - --project=$(GCP_PROJECT_ID) \ - --format="value(instance)" \ - --quiet | xargs -I {} -P 5 sh -c "gcloud compute ssh {} --project=$(GCP_PROJECT_ID) --zone=$(GCP_ZONE) --command='sudo killall -9 --older-than 24h firecracker'" - @echo "Killing all firecracker processes" - .PHONY: mock-snapshot mock-snapshot: - sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) CONSUL_TOKEN=$(CONSUL_TOKEN) NODE_ID="test-client" go run cmd/mock-snapshot/mock.go -template 5wzg6c91u51yaebviysf -build "f0370054-b669-4d7e-b33b-573d5287c6ef" -alive 1 -count 1 - -.PHONY: mock-snapshot-aws -mock-snapshot-aws: - sudo AWS_ENABLED=true \ - TEMPLATE_AWS_BUCKET_NAME=$(TEMPLATE_AWS_BUCKET_NAME) \ - AWS_REGION=$(AWS_REGION) \ - AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) \ - AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) \ - CONSUL_TOKEN=$(CONSUL_TOKEN) \ - NODE_ID="test-client-aws" \ - go run cmd/mock-snapshot/mock.go -template $(TEMPLATE_ID) -build $(BUILD_ID) -alive 1 -count 1 + sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) CONSUL_TOKEN=$(CONSUL_TOKEN) NODE_ID="testclient" go run cmd/mock-snapshot/mock.go -template 5wzg6c91u51yaebviysf -build "f0370054-b669-4d7e-b33b-573d5287c6ef" -alive 1 -count 1 .PHONY: test test: go test -v ./... + +.PHONY: build-template +build-template: + sudo -E TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) \ + GOOGLE_SERVICE_ACCOUNT_BASE64=$(GOOGLE_SERVICE_ACCOUNT_BASE64) \ + DOCKER_AUTH_BASE64=$(DOCKER_AUTH_BASE64) \ + GCP_PROJECT_ID=$(GCP_PROJECT_ID) \ + GCP_DOCKER_REPOSITORY_NAME=$(GCP_DOCKER_REPOSITORY_NAME) \ + GCP_REGION=$(GCP_REGION) \ + ENVIRONMENT=local \ + go run cmd/build-template/main.go \ + -template $(TEMPLATE_ID) \ + -build $(BUILD_ID) \ + -kernel $(KERNEL_VERSION) \ + -firecracker $(FIRECRACKER_VERSION) + +.PHONY: migrate +migrate: + ./upload-envs.sh /mnt/disks/fc-envs/v1 $(TEMPLATE_BUCKET_NAME) \ No newline at end of file diff --git a/packages/orchestrator/README.md b/packages/orchestrator/README.md index 4aba68c..d4f9024 100644 --- a/packages/orchestrator/README.md +++ b/packages/orchestrator/README.md @@ -1,81 +1 @@ -# Orchestrator - -## Storage Providers - -The orchestrator supports both Google Cloud Storage (GCS) and AWS S3 as storage backends. - -### Storage Configuration - -#### AWS S3 Configuration - -To use AWS S3 as your storage provider: - -1. Set environment variables: - - `AWS_ENABLED=true` - Enables AWS S3 as the storage provider - - `TEMPLATE_AWS_BUCKET_NAME` - Name of your S3 bucket for template storage - - `AWS_REGION` - AWS region where your S3 bucket is located (defaults to us-east-1) - - `AWS_ACCESS_KEY_ID` - Your AWS access key - - `AWS_SECRET_ACCESS_KEY` - Your AWS secret key - -2. Infrastructure preparation: - - Create an S3 bucket in your AWS account - - Ensure the IAM user associated with your credentials has appropriate permissions: - ```json - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::YOUR_BUCKET_NAME", - "arn:aws:s3:::YOUR_BUCKET_NAME/*" - ] - } - ] - } - ``` - -3. To build and upload the orchestrator to AWS: - ``` - make build-and-upload - ``` - -#### Google Cloud Storage Configuration (default) - -To use Google Cloud Storage: -- Ensure `AWS_ENABLED` is not set or set to any value other than "true" -- Set `TEMPLATE_BUCKET_NAME` to your GCS bucket name -- Configure standard GCP credentials - -## Development - -### Building for AWS - -```bash -# Build the Docker image and push to AWS ECR -make upload-aws - -# Build local binary -make build-local -``` - -### Testing AWS S3 Integration - -To test the AWS S3 integration: - -1. Set the required environment variables -2. Run the mock sandbox with AWS configuration: - ```bash - AWS_ENABLED=true \ - TEMPLATE_AWS_BUCKET_NAME=your-s3-bucket \ - AWS_REGION=us-east-1 \ - AWS_ACCESS_KEY_ID=your-access-key \ - AWS_SECRET_ACCESS_KEY=your-secret-key \ - sudo go run cmd/mock-sandbox/mock.go -template your-template-id -build your-build-id -alive 1 -count 1 - ``` \ No newline at end of file +# Orchestrator \ No newline at end of file diff --git a/packages/orchestrator/cmd/build-template/main.go b/packages/orchestrator/cmd/build-template/main.go new file mode 100644 index 0000000..dfab5ce --- /dev/null +++ b/packages/orchestrator/cmd/build-template/main.go @@ -0,0 +1,167 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "net/http" + "os" + "time" + + "github.com/rs/zerolog/log" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric/noop" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/proxy" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/template" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + l "github.com/e2b-dev/infra/packages/shared/pkg/logger" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" +) + +const proxyPort = 5007 + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + templateID := flag.String("template", "", "template id") + buildID := flag.String("build", "", "build id") + kernelVersion := flag.String("kernel", "", "kernel version") + fcVersion := flag.String("firecracker", "", "firecracker version") + flag.Parse() + + err := buildTemplate(ctx, *kernelVersion, *fcVersion, *templateID, *buildID) + if err != nil { + log.Fatal().Err(err).Msg("error building template") + os.Exit(1) + } +} + +func buildTemplate(parentCtx context.Context, kernelVersion, fcVersion, templateID, buildID string) error { + ctx, cancel := context.WithTimeout(parentCtx, time.Minute*5) + defer cancel() + + clientID := "build-template-cmd" + logger, err := l.NewLogger(ctx, l.LoggerConfig{ + ServiceName: clientID, + IsInternal: true, + IsDebug: true, + EnableConsole: true, + }) + if err != nil { + return fmt.Errorf("could not create logger: %w", err) + } + zap.ReplaceGlobals(logger) + sbxlogger.SetSandboxLoggerExternal(logger) + sbxlogger.SetSandboxLoggerInternal(logger) + + tracer := otel.Tracer("test") + + logger.Info("building template", l.WithTemplateID(templateID), l.WithBuildID(buildID)) + + // The sandbox map is shared between the server and the proxy + // to propagate information about sandbox routing. + sandboxes := smap.New[*sandbox.Sandbox]() + + sandboxProxy, err := proxy.NewSandboxProxy(noop.MeterProvider{}, proxyPort, sandboxes) + if err != nil { + logger.Fatal("failed to create sandbox proxy", zap.Error(err)) + } + go func() { + err := sandboxProxy.Start() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + logger.Error("failed to start sandbox proxy", zap.Error(err)) + } + }() + defer func() { + err := sandboxProxy.Close(parentCtx) + if err != nil { + logger.Error("error closing sandbox proxy", zap.Error(err)) + } + }() + + persistence, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + return fmt.Errorf("could not create storage provider: %w", err) + } + + devicePool, err := nbd.NewDevicePool(ctx, noop.MeterProvider{}) + if err != nil { + return fmt.Errorf("could not create device pool: %w", err) + } + defer func() { + err := devicePool.Close(parentCtx) + if err != nil { + logger.Error("error closing device pool", zap.Error(err)) + } + }() + + networkPool, err := network.NewPool(ctx, noop.MeterProvider{}, 8, 8, clientID, tracer) + if err != nil { + return fmt.Errorf("could not create network pool: %w", err) + } + defer func() { + err := networkPool.Close(parentCtx) + if err != nil { + logger.Error("error closing network pool", zap.Error(err)) + } + }() + + artifactRegistry, err := artifactsregistry.GetArtifactsRegistryProvider() + if err != nil { + return fmt.Errorf("error getting artifacts registry provider: %v", err) + } + + templateStorage := template.NewStorage(persistence) + builder := build.NewBuilder( + logger, + logger, + tracer, + templateStorage, + persistence, + artifactRegistry, + devicePool, + networkPool, + sandboxProxy, + sandboxes, + ) + + logsWriter := writer.New( + logger. + With(zap.Field{Type: zapcore.StringType, Key: "envID", String: templateID}). + With(zap.Field{Type: zapcore.StringType, Key: "buildID", String: buildID}), + ) + config := &build.TemplateConfig{ + TemplateFiles: storage.NewTemplateFiles( + templateID, + buildID, + kernelVersion, + fcVersion, + ), + VCpuCount: 2, + MemoryMB: 1024, + StartCmd: "echo 'start cmd debug' && sleep 10 && echo 'done starting command debug'", + DiskSizeMB: 1024, + BuildLogsWriter: logsWriter, + HugePages: true, + } + + _, err = builder.Build(ctx, config) + if err != nil { + return fmt.Errorf("error building template: %w", err) + } + + fmt.Println("Build finished, closing...") + return nil +} diff --git a/packages/orchestrator/cmd/inspect-data/main.go b/packages/orchestrator/cmd/inspect-data/main.go index 5bd19ea..4eaf0dc 100644 --- a/packages/orchestrator/cmd/inspect-data/main.go +++ b/packages/orchestrator/cmd/inspect-data/main.go @@ -8,7 +8,6 @@ import ( "log" "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" ) func main() { @@ -24,19 +23,19 @@ func main() { *buildId, "", "", - false, ) var storagePath string var blockSize int64 - if *kind == "memfile" { + switch *kind { + case "memfile": storagePath = template.StorageMemfilePath() blockSize = 2097152 - } else if *kind == "rootfs" { + case "rootfs": storagePath = template.StorageRootfsPath() blockSize = 4096 - } else { + default: log.Fatalf("invalid kind: %s", *kind) } @@ -46,7 +45,15 @@ func main() { ctx := context.Background() - obj := s3.NewObject(ctx, s3.GetTemplateBucket(), storagePath) + storage, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + log.Fatalf("failed to get storage provider: %s", err) + } + + obj, err := storage.OpenObject(ctx, storagePath) + if err != nil { + log.Fatalf("failed to open object: %s", err) + } size, err := obj.Size() if err != nil { @@ -67,7 +74,7 @@ func main() { fmt.Printf("\nMETADATA\n") fmt.Printf("========\n") - fmt.Printf("Storage path %s/%s\n", s3.GetTemplateBucket().Name, storagePath) + fmt.Printf("Storage %s/%s\n", storage.GetDetails(), storagePath) fmt.Printf("Build ID %s\n", *buildId) fmt.Printf("Size %d B (%d MiB)\n", size, size/1024/1024) fmt.Printf("Block size %d B\n", blockSize) diff --git a/packages/orchestrator/cmd/inspect-header/main.go b/packages/orchestrator/cmd/inspect-header/main.go index f066eaf..c9743dd 100644 --- a/packages/orchestrator/cmd/inspect-header/main.go +++ b/packages/orchestrator/cmd/inspect-header/main.go @@ -9,7 +9,6 @@ import ( "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" ) func main() { @@ -23,22 +22,29 @@ func main() { *buildId, "", "", - false, ) var storagePath string - if *kind == "memfile" { + switch *kind { + case "memfile": storagePath = template.StorageMemfileHeaderPath() - } else if *kind == "rootfs" { + case "rootfs": storagePath = template.StorageRootfsHeaderPath() - } else { + default: log.Fatalf("invalid kind: %s", *kind) } ctx := context.Background() + storage, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + log.Fatalf("failed to get storage provider: %s", err) + } - obj := s3.NewObject(ctx, s3.GetTemplateBucket(), storagePath) + obj, err := storage.OpenObject(ctx, storagePath) + if err != nil { + log.Fatalf("failed to open object: %s", err) + } h, err := header.Deserialize(obj) if err != nil { @@ -47,7 +53,7 @@ func main() { fmt.Printf("\nMETADATA\n") fmt.Printf("========\n") - fmt.Printf("Storage path %s/%s\n", s3.GetTemplateBucket().Name, storagePath) + fmt.Printf("Storage %s/%s\n", storage.GetDetails(), storagePath) fmt.Printf("Version %d\n", h.Metadata.Version) fmt.Printf("Generation %d\n", h.Metadata.Generation) fmt.Printf("Build ID %s\n", h.Metadata.BuildId) diff --git a/packages/orchestrator/cmd/mock-nbd/mock.go b/packages/orchestrator/cmd/mock-nbd/mock.go index f7df41f..c810004 100644 --- a/packages/orchestrator/cmd/mock-nbd/mock.go +++ b/packages/orchestrator/cmd/mock-nbd/mock.go @@ -8,9 +8,13 @@ import ( "os" "os/signal" + "github.com/google/uuid" "github.com/pojntfx/go-nbd/pkg/backend" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric/noop" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" ) const blockSize = 4096 @@ -34,6 +38,23 @@ func (d *DeviceWithClose) Slice(offset, length int64) ([]byte, error) { return b, nil } +func (d *DeviceWithClose) BlockSize() int64 { + return blockSize +} + +func (d *DeviceWithClose) Header() *header.Header { + size, err := d.Backend.Size() + if err != nil { + panic(err) + } + + return header.NewHeader(header.NewTemplateMetadata( + uuid.New(), + uint64(blockSize), + uint64(size), + ), nil) +} + func main() { data := make([]byte, blockSize*8) rand.Read(data) @@ -47,6 +68,12 @@ func main() { done := make(chan os.Signal, 1) signal.Notify(done, os.Interrupt) + devicePool, err := nbd.NewDevicePool(ctx, noop.MeterProvider{}) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create device pool: %v\n", err) + + return + } go func() { <-done @@ -63,7 +90,7 @@ func main() { fmt.Printf("----------------------------------------\n") fmt.Printf("[%d] starting mock nbd server\n", i) - readData, err := MockNbd(ctx, device, i) + readData, err := MockNbd(ctx, device, i, devicePool) if err != nil { fmt.Fprintf(os.Stderr, "[%d] failed to mock nbd: %v\n", i, err) @@ -78,7 +105,7 @@ func main() { } } -func MockNbd(ctx context.Context, device *DeviceWithClose, index int) ([]byte, error) { +func MockNbd(ctx context.Context, device *DeviceWithClose, index int, devicePool *nbd.DevicePool) ([]byte, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -87,7 +114,7 @@ func MockNbd(ctx context.Context, device *DeviceWithClose, index int) ([]byte, e return nil, fmt.Errorf("failed to get size: %w", err) } - deviceIndex, err := nbd.Pool.GetDevice(ctx) + deviceIndex, err := devicePool.GetDevice(ctx) if err != nil { return nil, fmt.Errorf("failed to get device: %w", err) } @@ -99,14 +126,14 @@ func MockNbd(ctx context.Context, device *DeviceWithClose, index int) ([]byte, e for { counter += 1 - err = nbd.Pool.ReleaseDevice(deviceIndex) + err = devicePool.ReleaseDevice(deviceIndex) if err != nil { if counter%10 == 0 { fmt.Printf("[%d] failed to release device: %v\n", index, err) } if mnt != nil { - mnt.Close() + mnt.Close(ctx) } continue @@ -118,12 +145,13 @@ func MockNbd(ctx context.Context, device *DeviceWithClose, index int) ([]byte, e } }() - mnt = nbd.NewDirectPathMount(device) + tracer := otel.Tracer("test") + mnt = nbd.NewDirectPathMount(tracer, device, devicePool) go func() { <-ctx.Done() - mnt.Close() + mnt.Close(context.TODO()) }() _, err = mnt.Open(ctx) diff --git a/packages/orchestrator/cmd/mock-sandbox/mock.go b/packages/orchestrator/cmd/mock-sandbox/mock.go deleted file mode 100644 index be701d9..0000000 --- a/packages/orchestrator/cmd/mock-sandbox/mock.go +++ /dev/null @@ -1,162 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "time" - - "go.opentelemetry.io/otel" - - "github.com/e2b-dev/infra/packages/orchestrator/internal/dns" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" - "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" - "github.com/e2b-dev/infra/packages/shared/pkg/logs" -) - -func main() { - templateId := flag.String("template", "", "template id") - buildId := flag.String("build", "", "build id") - sandboxId := flag.String("sandbox", "", "sandbox id") - keepAlive := flag.Int("alive", 0, "keep alive") - count := flag.Int("count", 1, "number of serially spawned sandboxes") - - flag.Parse() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - done := make(chan os.Signal, 1) - signal.Notify(done, os.Interrupt) - - go func() { - <-done - - cancel() - }() - - dnsServer := dns.New() - go func() { - log.Printf("Starting DNS server") - - err := dnsServer.Start("127.0.0.4", 53) - if err != nil { - log.Fatalf("Failed running DNS server: %s\n", err.Error()) - } - }() - - templateCache, err := template.NewCache(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create template cache: %v\n", err) - - return - } - - networkPool, err := network.NewPool(ctx, *count, 0) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create network pool: %v\n", err) - - return - } - defer networkPool.Close() - - for i := 0; i < *count; i++ { - fmt.Println("--------------------------------") - fmt.Printf("Starting sandbox %d\n", i) - - v := i - - err = mockSandbox( - ctx, - *templateId, - *buildId, - *sandboxId+"-"+strconv.Itoa(v), - dnsServer, - time.Duration(*keepAlive)*time.Second, - networkPool, - templateCache, - ) - if err != nil { - break - } - } -} - -func mockSandbox( - ctx context.Context, - templateId, - buildId, - sandboxId string, - dns *dns.DNS, - keepAlive time.Duration, - networkPool *network.Pool, - templateCache *template.Cache, -) error { - tracer := otel.Tracer(fmt.Sprintf("sandbox-%s", sandboxId)) - childCtx, _ := tracer.Start(ctx, "mock-sandbox") - - start := time.Now() - logger := logs.NewSandboxLogger(sandboxId, templateId, "test-team", 2, 512, false) - - sbx, cleanup, err := sandbox.NewSandbox( - childCtx, - tracer, - dns, - networkPool, - templateCache, - &orchestrator.SandboxConfig{ - TemplateId: templateId, - // FirecrackerVersion: "v1.10.1_1fcdaec", - // KernelVersion: "vmlinux-6.1.102", - FirecrackerVersion: "v1.7.0-dev_8bb88311", - KernelVersion: "vmlinux-5.10.186", - TeamId: "test-team", - BuildId: buildId, - HugePages: true, - MaxSandboxLength: 1, - SandboxId: sandboxId, - EnvdVersion: "0.1.1", - RamMb: 512, - Vcpu: 2, - }, - "trace-test-1", - time.Now(), - time.Now(), - logger, - true, - templateId, - ) - defer func() { - cleanupErr := cleanup.Run() - if cleanupErr != nil { - fmt.Fprintf(os.Stderr, "failed to cleanup sandbox: %v\n", cleanupErr) - } - }() - - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create sandbox: %v\n", err) - - return err - } - - duration := time.Since(start) - - fmt.Printf("[Sandbox is running] - started in %dms \n", duration.Milliseconds()) - - time.Sleep(keepAlive) - - err = sbx.Stop() - if err != nil { - fmt.Fprintf(os.Stderr, "failed to stop sandbox: %v\n", err) - - return err - } - - return nil -} diff --git a/packages/orchestrator/cmd/mock-snapshot/mock.go b/packages/orchestrator/cmd/mock-snapshot/mock.go deleted file mode 100644 index 45aadec..0000000 --- a/packages/orchestrator/cmd/mock-snapshot/mock.go +++ /dev/null @@ -1,285 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "os/signal" - "strconv" - "time" - - "go.opentelemetry.io/otel" - "golang.org/x/sync/errgroup" - - "github.com/e2b-dev/infra/packages/orchestrator/internal/dns" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" - "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" - "github.com/e2b-dev/infra/packages/shared/pkg/logs" - "github.com/e2b-dev/infra/packages/shared/pkg/storage" -) - -func main() { - templateId := flag.String("template", "", "template id") - buildId := flag.String("build", "", "build id") - sandboxId := flag.String("sandbox", "", "sandbox id") - keepAlive := flag.Int("alive", 0, "keep alive") - count := flag.Int("count", 1, "number of serially spawned sandboxes") - - flag.Parse() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - done := make(chan os.Signal, 1) - signal.Notify(done, os.Interrupt) - - go func() { - <-done - - cancel() - }() - - dnsServer := dns.New() - go func() { - log.Printf("Starting DNS server") - - err := dnsServer.Start("127.0.0.4", 53) - if err != nil { - log.Fatalf("Failed running DNS server: %s\n", err.Error()) - } - }() - - templateCache, err := template.NewCache(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create template cache: %v\n", err) - - return - } - - networkPool, err := network.NewPool(ctx, *count, 0) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create network pool: %v\n", err) - - return - } - defer networkPool.Close() - - eg, ctx := errgroup.WithContext(ctx) - - for i := 0; i < *count; i++ { - fmt.Println("--------------------------------") - fmt.Printf("Starting sandbox %d\n", i) - - v := i - - err = mockSnapshot( - ctx, - *templateId, - *buildId, - *sandboxId+"-"+strconv.Itoa(v), - dnsServer, - time.Duration(*keepAlive)*time.Second, - networkPool, - templateCache, - ) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to start sandbox: %v\n", err) - return - } - } - - err = eg.Wait() - if err != nil { - fmt.Fprintf(os.Stderr, "failed to start sandboxes: %v\n", err) - } -} - -func mockSnapshot( - ctx context.Context, - templateId, - buildId, - sandboxId string, - dns *dns.DNS, - keepAlive time.Duration, - networkPool *network.Pool, - templateCache *template.Cache, -) error { - tracer := otel.Tracer(fmt.Sprintf("sandbox-%s", sandboxId)) - childCtx, _ := tracer.Start(ctx, "mock-sandbox") - - logger := logs.NewSandboxLogger(sandboxId, templateId, "test-team", 2, 512, false) - - start := time.Now() - - sbx, cleanup, err := sandbox.NewSandbox( - childCtx, - tracer, - dns, - networkPool, - templateCache, - &orchestrator.SandboxConfig{ - TemplateId: templateId, - FirecrackerVersion: "v1.7.0-dev_8bb88311", - KernelVersion: "vmlinux-5.10.186", - TeamId: "test-team", - BuildId: buildId, - HugePages: true, - MaxSandboxLength: 1, - SandboxId: sandboxId, - EnvdVersion: "0.1.1", - RamMb: 512, - Vcpu: 2, - }, - "trace-test-1", - time.Now(), - time.Now(), - logger, - false, - templateId, - ) - defer func() { - cleanupErr := cleanup.Run() - if cleanupErr != nil { - fmt.Fprintf(os.Stderr, "failed to cleanup sandbox: %v\n", cleanupErr) - } - }() - - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create sandbox: %v\n", err) - - return err - } - - duration := time.Since(start) - - fmt.Printf("[Sandbox is running] - started in %dms \n", duration.Milliseconds()) - - time.Sleep(keepAlive) - - fmt.Println("Snapshotting sandbox") - - snapshotTime := time.Now() - - snapshotTemplateFiles, err := storage.NewTemplateFiles( - "snapshot-template", - "f0370054-b669-eee4-b33b-573d5287c6ef", - sbx.Config.KernelVersion, - sbx.Config.FirecrackerVersion, - sbx.Config.HugePages, - ).NewTemplateCacheFiles() - if err != nil { - return fmt.Errorf("failed to create snapshot template files: %w", err) - } - - err = os.MkdirAll(snapshotTemplateFiles.CacheDir(), 0o755) - if err != nil { - return fmt.Errorf("failed to create snapshot template files directory: %w", err) - } - - defer func() { - err := os.RemoveAll(snapshotTemplateFiles.CacheDir()) - if err != nil { - fmt.Fprintf(os.Stderr, "error removing sandbox cache dir '%s': %v\n", snapshotTemplateFiles.CacheDir(), err) - } - }() - - fmt.Println("Snapshotting sandbox") - - snapshot, err := sbx.Snapshot(ctx, otel.Tracer("orchestrator-mock"), snapshotTemplateFiles, func() {}) - if err != nil { - return fmt.Errorf("failed to snapshot sandbox: %w", err) - } - - fmt.Println("Create snapshot time: ", time.Since(snapshotTime).Milliseconds()) - - err = templateCache.AddSnapshot( - snapshotTemplateFiles.TemplateId, - snapshotTemplateFiles.BuildId, - snapshotTemplateFiles.KernelVersion, - snapshotTemplateFiles.FirecrackerVersion, - snapshotTemplateFiles.Hugepages(), - snapshot.MemfileDiffHeader, - snapshot.RootfsDiffHeader, - snapshot.Snapfile, - snapshot.MemfileDiff, - snapshot.RootfsDiff, - ) - if err != nil { - return fmt.Errorf("failed to add snapshot to template cache: %w", err) - } - - fmt.Println("Add snapshot to template cache time: ", time.Since(snapshotTime).Milliseconds()) - - start = time.Now() - - sbx, cleanup2, err := sandbox.NewSandbox( - childCtx, - tracer, - dns, - networkPool, - templateCache, - &orchestrator.SandboxConfig{ - TemplateId: snapshotTemplateFiles.TemplateId, - FirecrackerVersion: snapshotTemplateFiles.FirecrackerVersion, - KernelVersion: snapshotTemplateFiles.KernelVersion, - TeamId: "test-team", - BuildId: snapshotTemplateFiles.BuildId, - HugePages: snapshotTemplateFiles.Hugepages(), - MaxSandboxLength: 1, - SandboxId: sandboxId, - EnvdVersion: "0.1.1", - RamMb: 512, - Vcpu: 2, - }, - "trace-test-1", - time.Now(), - time.Now(), - logger, - false, - templateId, - ) - defer func() { - cleanupErr := cleanup2.Run() - if cleanupErr != nil { - fmt.Fprintf(os.Stderr, "failed to cleanup sandbox: %v\n", cleanupErr) - } - }() - - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create sandbox: %v\n", err) - - return err - } - - duration = time.Since(start) - - fmt.Printf("[Resumed sandbox is running] - started in %dms \n", duration.Milliseconds()) - - time.Sleep(keepAlive) - - // b := storage.NewTemplateBuild( - // snapshot.MemfileDiffHeader, - // snapshot.RootfsDiffHeader, - // snapshotTemplateFiles.TemplateFiles, - // ) - - // err = <-b.Upload( - // ctx, - // snapshotTemplateFiles.CacheSnapfilePath(), - // snapshotTemplateFiles.CacheMemfilePath(), - // snapshotTemplateFiles.CacheRootfsPath(), - // ) - // if err != nil { - // return fmt.Errorf("failed to upload snapshot template files: %w", err) - // } - - fmt.Println("Upload snapshot time: ", time.Since(snapshotTime).Milliseconds()) - - duration = time.Since(snapshotTime) - - return nil -} diff --git a/packages/orchestrator/cmd/simulate-headers-merge/main.go b/packages/orchestrator/cmd/simulate-headers-merge/main.go index ec4199b..913470a 100644 --- a/packages/orchestrator/cmd/simulate-headers-merge/main.go +++ b/packages/orchestrator/cmd/simulate-headers-merge/main.go @@ -7,10 +7,10 @@ import ( "log" "os" + "github.com/google/uuid" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" - "github.com/google/uuid" ) func main() { @@ -26,7 +26,6 @@ func main() { *baseBuildId, "", "", - false, ) diffTemplate := storage.NewTemplateFiles( @@ -34,26 +33,38 @@ func main() { *diffBuildId, "", "", - false, ) var baseStoragePath string var diffStoragePath string - if *kind == "memfile" { + switch *kind { + case "memfile": baseStoragePath = baseTemplate.StorageMemfileHeaderPath() diffStoragePath = diffTemplate.StorageMemfileHeaderPath() - } else if *kind == "rootfs" { + case "rootfs": baseStoragePath = baseTemplate.StorageRootfsHeaderPath() diffStoragePath = diffTemplate.StorageRootfsHeaderPath() - } else { + default: log.Fatalf("invalid kind: %s", *kind) } ctx := context.Background() - baseObj := s3.NewObject(ctx, s3.GetTemplateBucket(), baseStoragePath) - diffObj := s3.NewObject(ctx, s3.GetTemplateBucket(), diffStoragePath) + storage, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + log.Fatalf("failed to get storage provider: %s", err) + } + + baseObj, err := storage.OpenObject(ctx, baseStoragePath) + if err != nil { + log.Fatalf("failed to open object: %s", err) + } + + diffObj, err := storage.OpenObject(ctx, diffStoragePath) + if err != nil { + log.Fatalf("failed to open object: %s", err) + } baseHeader, err := header.Deserialize(baseObj) if err != nil { @@ -66,7 +77,7 @@ func main() { } fmt.Printf("\nBASE METADATA\n") - fmt.Printf("Storage path %s/%s\n", s3.GetTemplateBucket().Name, baseStoragePath) + fmt.Printf("Storage path %s/%s\n", storage.GetDetails(), baseStoragePath) fmt.Printf("========\n") for _, mapping := range baseHeader.Mapping { @@ -97,7 +108,7 @@ func main() { } fmt.Printf("\nDIFF METADATA\n") - fmt.Printf("Storage path %s/%s\n", s3.GetTemplateBucket().Name, diffStoragePath) + fmt.Printf("Storage path %s/%s\n", storage.GetDetails(), diffStoragePath) fmt.Printf("========\n") onlyDiffMappings := make([]*header.BuildMap, 0) diff --git a/packages/orchestrator/go.mod b/packages/orchestrator/go.mod index 0dee620..9d7d868 100644 --- a/packages/orchestrator/go.mod +++ b/packages/orchestrator/go.mod @@ -1,65 +1,110 @@ module github.com/e2b-dev/infra/packages/orchestrator -go 1.24 +go 1.24.3 require ( + connectrpc.com/connect v1.18.1 github.com/Merovius/nbd v0.0.0-20240812113926-fd65a54c9949 - github.com/bits-and-blooms/bitset v1.17.0 + github.com/bits-and-blooms/bitset v1.22.0 + github.com/containernetworking/plugins v1.6.0 + github.com/containers/storage v1.58.0 github.com/coreos/go-iptables v0.8.0 + github.com/dustin/go-humanize v1.0.1 github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/edsrzf/mmap-go v1.2.0 github.com/firecracker-microvm/firecracker-go-sdk v1.0.0 github.com/go-openapi/strfmt v0.23.0 + github.com/google/go-containerregistry v0.20.5 + github.com/google/nftables v0.3.0 github.com/google/uuid v1.6.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 - github.com/hashicorp/consul/api v1.31.2 - github.com/jellydator/ttlcache/v3 v3.3.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 + github.com/hashicorp/consul/api v1.30.0 + github.com/jellydator/ttlcache/v3 v3.3.1-0.20250207140243-aefc35918359 + github.com/launchdarkly/go-sdk-common/v3 v3.1.0 github.com/loopholelabs/userfaultfd-go v0.1.2 - github.com/miekg/dns v1.1.64 + github.com/ngrok/firewall_toolkit v0.0.18 + github.com/pkg/errors v0.9.1 github.com/pojntfx/go-nbd v0.3.2 - github.com/vishvananda/netlink v1.3.0 + github.com/rs/zerolog v1.34.0 + github.com/soheilhy/cmux v0.1.5 + github.com/stretchr/testify v1.10.0 + github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 github.com/vishvananda/netns v0.0.5 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/metric v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 - go.uber.org/zap v1.18.1 - golang.org/x/mod v0.23.0 - golang.org/x/sync v0.11.0 - golang.org/x/sys v0.30.0 - google.golang.org/grpc v1.68.0 - google.golang.org/protobuf v1.36.4 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 + go.opentelemetry.io/otel v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 + go.opentelemetry.io/otel/metric v1.36.0 + go.opentelemetry.io/otel/sdk/metric v1.35.0 + go.opentelemetry.io/otel/trace v1.36.0 + go.uber.org/zap v1.27.0 + golang.org/x/sync v0.14.0 + golang.org/x/sys v0.33.0 + google.golang.org/grpc v1.72.1 + google.golang.org/protobuf v1.36.6 + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 ) require ( + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/artifactregistry v1.16.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.2.2 // indirect + cloud.google.com/go/longrunning v0.6.3 // indirect + cloud.google.com/go/monitoring v1.21.2 // indirect + cloud.google.com/go/storage v1.50.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/DataDog/datadog-go/v5 v5.2.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.9 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.3 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect github.com/containerd/fifo v1.1.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containernetworking/cni v1.2.3 // indirect - github.com/containernetworking/plugins v1.6.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dchest/uniuri v1.2.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v28.1.1+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.1.1+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/gaissmai/extnetip v0.3.3 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect @@ -71,51 +116,100 @@ require ( github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v1.1.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/josharian/native v1.1.0 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/launchdarkly/ccache v1.1.0 // indirect + github.com/launchdarkly/eventsource v1.8.0 // indirect + github.com/launchdarkly/go-jsonstream/v3 v3.1.0 // indirect + github.com/launchdarkly/go-sdk-events/v3 v3.5.0 // indirect + github.com/launchdarkly/go-semver v1.0.3 // indirect + github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1 // indirect + github.com/launchdarkly/go-server-sdk/v7 v7.10.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mdlayher/genetlink v1.3.2 // indirect - github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 // indirect github.com/mdlayher/socket v0.5.1 // indirect + github.com/miekg/dns v1.1.63 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/capability v0.4.0 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect + github.com/moby/sys/user v0.4.0 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.23.4 // indirect + github.com/onsi/gomega v1.36.3 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect - github.com/rs/zerolog v1.33.0 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/stretchr/objx v0.5.2 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/zeebo/errs v1.4.0 // indirect go.mongodb.org/mongo-driver v1.17.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect - golang.org/x/net v0.35.0 // indirect - golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.30.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.9.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/log v0.10.0 // indirect + go.opentelemetry.io/otel/sdk v1.36.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect + go.opentelemetry.io/proto/otlp v1.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/text v0.25.0 // indirect + golang.org/x/time v0.11.0 // indirect + google.golang.org/api v0.214.0 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/e2b-dev/infra/packages/shared v0.0.0 => ../shared -replace github.com/mitchellh/osext => golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 +// Fix non existent garyburd/redigo from Microsoft/hcsshim/test v0.0.0 +replace github.com/garyburd/redigo => github.com/gomodule/redigo v1.9.2 + +tool ( + google.golang.org/grpc/cmd/protoc-gen-go-grpc + google.golang.org/protobuf/cmd/protoc-gen-go +) diff --git a/packages/orchestrator/go.sum b/packages/orchestrator/go.sum index 5d36ed3..9487495 100644 --- a/packages/orchestrator/go.sum +++ b/packages/orchestrator/go.sum @@ -1,4 +1,6 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -10,20 +12,46 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/artifactregistry v1.16.0 h1:BZpz0x8HCG7hwTkD+GlUwPQVFGOo9w84t8kxQwwc0DA= +cloud.google.com/go/artifactregistry v1.16.0/go.mod h1:LunXo4u2rFtvJjrGjO0JS+Gs9Eco2xbZU6JVJ4+T8Sk= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= +cloud.google.com/go/longrunning v0.6.3 h1:A2q2vuyXysRcwzqDpMMLSI6mb6o39miS52UEG/Rd2ng= +cloud.google.com/go/longrunning v0.6.3/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= +connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw= +connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -37,6 +65,16 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY= +github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0/go.mod h1:6fTWu4m3jocfUZLYF5KsZC1TUfRvEjs7lM4crme/irw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0 h1:jJKWl98inONJAr/IZrdFQUWcwUO95DLY1XMD1ZIut+g= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0/go.mod h1:l2fIqmwB+FKSfvn3bAD/0i+AXAxhIZjTK2svT/mgUXs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 h1:GYUJLfvd++4DMuMhCFLgLXvFwofIxh/qOwoGuS/LTew= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= github.com/Merovius/nbd v0.0.0-20240812113926-fd65a54c9949 h1:udSToqCviS4KPy3kps1QJKUqBjjzxruRr61OnECZL7Q= github.com/Merovius/nbd v0.0.0-20240812113926-fd65a54c9949/go.mod h1:A8CAY38Xm8Bo85Od6knJTdq35LFIGuZjjYhF4o4C1kY= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -47,6 +85,9 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -85,14 +126,14 @@ github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38y github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= -github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= -github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 h1:MTLivtC3s89de7Fe3P8rzML/8XPNRfuyJhlRTsCEt0k= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66/go.mod h1:NAuQ2s6gaFEsuTIb2+P5t6amB1w5MhvJFxppoezGWH0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74 h1:+1lc5oMFFHlVBclPXQf/POqlvdpBzjLaN2c3ujDCcZw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74/go.mod h1:EiskBoFr4SpYnFIbw8UM7DP7CacQXDHEmJqLI1xpRFI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= @@ -101,34 +142,34 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0 h1:E+UTVTDH6XTSjqxHWRuY8nB6s+05UllneWxnycplHFk= +github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0/go.mod h1:iQ1skgw1XRK+6Lgkb0I9ODatAP72WoTILh0zXQ5DtbU= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 h1:4nm2G6A4pV9rdlWzGMPv4BNtQp22v1hg3yrtkYpeLl8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 h1:BRXS0U76Z8wfF+bnkilA2QwpIch6URlm++yPUt9QPmQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3/go.mod h1:bNXKFFyaiVvWuR6O16h/I1724+aXe/tAkA9/QS01t5k= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -140,9 +181,13 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -155,6 +200,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= @@ -214,9 +261,13 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -245,6 +296,8 @@ github.com/containernetworking/plugins v1.6.0/go.mod h1:rYLQWMJz/dYuW1XhHdc9xuzd github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/storage v1.58.0 h1:Q7SyyCCjqgT3wYNgRNIL8o/wUS92heIj2/cc8Sewvcc= +github.com/containers/storage v1.58.0/go.mod h1:w7Jl6oG+OpeLGLzlLyOZPkmUso40kjpzgrHUk5tyBlo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -283,20 +336,36 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I= +github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84= github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -305,13 +374,23 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/firecracker-microvm/firecracker-go-sdk v1.0.0 h1:HTnxnX9pvQkQOHjv+TppzUyi2BNFL/7aegSlqIK/usY= github.com/firecracker-microvm/firecracker-go-sdk v1.0.0/go.mod h1:iXd7gqdwzvhB4VbNVMb70g/IY04fOuQbbBGM+PQEkgo= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= @@ -319,19 +398,20 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gaissmai/extnetip v0.3.3 h1:0nXgaD0/pylkVxCpxEAk43aOFq8ZqlUgB5KCejju7aE= +github.com/gaissmai/extnetip v0.3.3/go.mod h1:M3NWlyFKaVosQXWXKKeIPK+5VM4U85DahdIqNYX4TK4= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -434,11 +514,14 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -458,10 +541,11 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -469,23 +553,33 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.5 h1:4RnlYcDs5hoA++CeFjlbZ/U9Yp1EuWr+UhhTyYQjOP0= +github.com/google/go-containerregistry v0.20.5/go.mod h1:Q14vdOOzug02bwnhMkZKD4e30pDaD9W65qzXpyzF49E= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/nftables v0.3.0 h1:bkyZ0cbpVeMHXOrtlFc8ISmfVqq5gPJukoYieyVmITg= +github.com/google/nftables v0.3.0/go.mod h1:BCp9FsrbF1Fn/Yu6CLUc9GGZFw/+hsxfluNXXmxBfRM= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -493,8 +587,12 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -502,20 +600,19 @@ github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1 h1:KcFzXwzM/kGhIRHvc8jdixfIJjVzuUJdnv+5xsPutog= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.1/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= -github.com/hashicorp/consul/api v1.31.2 h1:NicObVJHcCmyOIl7Z9iHPvvFrocgTYo9cITSGg0/7pw= -github.com/hashicorp/consul/api v1.31.2/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= +github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -531,8 +628,8 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -549,8 +646,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= @@ -571,16 +668,14 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= -github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= -github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= +github.com/jellydator/ttlcache/v3 v3.3.1-0.20250207140243-aefc35918359 h1:uzTOUCYbGERlXB3wX2/u9AsMeXnZCd8yLl2DMAY1Wxs= +github.com/jellydator/ttlcache/v3 v3.3.1-0.20250207140243-aefc35918359/go.mod h1:aqa3CYl8S7MwpMXtFH3uNIEEfOjcn1MUNO+bQIGbFAQ= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= -github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -589,6 +684,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -598,6 +695,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -612,6 +713,26 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/launchdarkly/ccache v1.1.0 h1:voD1M+ZJXR3MREOKtBwgTF9hYHl1jg+vFKS/+VAkR2k= +github.com/launchdarkly/ccache v1.1.0/go.mod h1:TlxzrlnzvYeXiLHmesMuvoZetu4Z97cV1SsdqqBJi1Q= +github.com/launchdarkly/eventsource v1.8.0 h1:o9TL53lINP9PCrKESlpIZADvN+eHWlSVmAzZDZ+FEA0= +github.com/launchdarkly/eventsource v1.8.0/go.mod h1:IBckHy1VOjJGqSg07EJJLiUnk5DPunX9LKD9vbcgeHo= +github.com/launchdarkly/go-jsonstream/v3 v3.1.0 h1:U/7/LplZO72XefBQ+FzHf6o4FwLHVqBE+4V58Ornu/E= +github.com/launchdarkly/go-jsonstream/v3 v3.1.0/go.mod h1:2Pt4BR5AwWgsuVTCcIpB6Os04JFIKWfoA+7faKkZB5E= +github.com/launchdarkly/go-sdk-common/v3 v3.1.0 h1:KNCP5rfkOt/25oxGLAVgaU1BgrZnzH9Y/3Z6I8bMwDg= +github.com/launchdarkly/go-sdk-common/v3 v3.1.0/go.mod h1:mXFmDGEh4ydK3QilRhrAyKuf9v44VZQWnINyhqbbOd0= +github.com/launchdarkly/go-sdk-events/v3 v3.5.0 h1:Yav8Thm70dZbO8U1foYwZPf3w60n/lNBRaYeeNM/qg4= +github.com/launchdarkly/go-sdk-events/v3 v3.5.0/go.mod h1:oepYWQ2RvvjfL2WxkE1uJJIuRsIMOP4WIVgUpXRPcNI= +github.com/launchdarkly/go-semver v1.0.3 h1:agIy/RN3SqeQDIfKkl+oFslEdeIs7pgsJBs3CdCcGQM= +github.com/launchdarkly/go-semver v1.0.3/go.mod h1:xFmMwXba5Mb+3h72Z+VeSs9ahCvKo2QFUTHRNHVqR28= +github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1 h1:rTgcYAFraGFj7sBMB2b7JCYCm0b9kph4FaMX02t4osQ= +github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1/go.mod h1:fPS5d+zOsgFnMunj+Ki6jjlZtFvo4h9iNbtNXxzYn58= +github.com/launchdarkly/go-server-sdk/v7 v7.10.0 h1:LK6+nEAf3884WqH0rZvrEXDJFkNPMAYt+wylCoSsaRM= +github.com/launchdarkly/go-server-sdk/v7 v7.10.0/go.mod h1:G2aEvHogBRuak5Xsqj22YKjz0bGd2rlkrQ1917NVo+s= +github.com/launchdarkly/go-test-helpers/v2 v2.2.0 h1:L3kGILP/6ewikhzhdNkHy1b5y4zs50LueWenVF0sBbs= +github.com/launchdarkly/go-test-helpers/v2 v2.2.0/go.mod h1:L7+th5govYp5oKU9iN7To5PgznBuIjBPn+ejqKR0avw= +github.com/launchdarkly/go-test-helpers/v3 v3.0.2 h1:rh0085g1rVJM5qIukdaQ8z1XTWZztbJ49vRZuveqiuU= +github.com/launchdarkly/go-test-helpers/v3 v3.0.2/go.mod h1:u2ZvJlc/DDJTFrshWW50tWMZHLVYXofuSHUfTU/eIwM= github.com/loopholelabs/userfaultfd-go v0.1.2 h1:HwXFNoQ+/eWNgYIcIyrqn54gDVVJk+TmszYxMGnJVu4= github.com/loopholelabs/userfaultfd-go v0.1.2/go.mod h1:6+5c50Ji7MUXuWUSrPUhAttECwmEeLAmR33FlP7Fn4o= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -629,8 +750,9 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -648,16 +770,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/socket v0.2.0/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E= github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= github.com/mdlayher/vsock v1.1.1/go.mod h1:Y43jzcy7KM3QB+/FK15pfqGxDMCMzUXWegEfIbSM18U= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ= -github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= +github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY= +github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -668,24 +790,43 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/capability v0.4.0 h1:4D4mI6KlNtWMCM1Z/K0i7RV1FkX+DBDHKVJpCndZoHk= +github.com/moby/sys/capability v0.4.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ngrok/firewall_toolkit v0.0.18 h1:/+Rx/5qXXO8FpOoKpPnyR2nw8Y3KumuulSNZa3XGZE8= +github.com/ngrok/firewall_toolkit v0.0.18/go.mod h1:g1yp6uBx0r6A6+lICpZk4PEUCOlBF7D8m+b2q618Wj4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -700,10 +841,9 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -711,15 +851,18 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= +github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -730,10 +873,11 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= @@ -741,6 +885,8 @@ github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= @@ -750,6 +896,8 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -795,9 +943,9 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= @@ -822,6 +970,8 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -836,12 +986,15 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -854,6 +1007,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -865,16 +1020,20 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= -github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -884,6 +1043,8 @@ github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zd github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= @@ -895,9 +1056,12 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -912,39 +1076,58 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/otelzap v0.9.0 h1:f+xpAfhQTjR8beiSMe1bnT/25PkeyWmOcI+SjXWguNw= +go.opentelemetry.io/contrib/bridges/otelzap v0.9.0/go.mod h1:T1Z1jyS5FttgQoF6UcGhnM+gF9wU32B4lHO69nXw4FE= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 h1:5dTKu4I5Dn4P2hxyW3l3jTaZx9ACgg0ECos1eAVrheY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/log v0.10.0 h1:1CXmspaRITvFcjA4kyVszuG4HjA61fPDxMb7q3BuyF0= +go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw= +go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= +go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -963,8 +1146,8 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.31.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.31.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -975,8 +1158,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -997,8 +1180,9 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1033,21 +1217,25 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1059,8 +1247,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1135,11 +1323,12 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1150,8 +1339,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1164,13 +1353,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1183,6 +1374,7 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1196,7 +1388,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1215,8 +1406,9 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1232,6 +1424,8 @@ google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1258,13 +1452,14 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1277,11 +1472,12 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= -google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1294,8 +1490,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= -google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1323,7 +1519,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1331,9 +1526,12 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1365,6 +1563,8 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/packages/orchestrator/info.proto b/packages/orchestrator/info.proto new file mode 100644 index 0000000..22e9d4d --- /dev/null +++ b/packages/orchestrator/info.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "https://github.com/e2b-dev/infra/orchestrator"; + +// needs to be different from the enumeration in the template manager +enum ServiceInfoStatus { + OrchestratorHealthy = 0; + OrchestratorDraining = 1; + OrchestratorUnhealthy = 2; +} + +enum ServiceInfoRole { + TemplateBuilder = 0; + Orchestrator = 1; +} + +message ServiceInfoResponse { + string node_id = 1; + string service_id = 2; + string service_version = 3; + string service_commit = 4; + + ServiceInfoStatus service_status = 51; + repeated ServiceInfoRole service_roles = 52; + google.protobuf.Timestamp service_startup = 53; + + int64 metric_vcpu_used = 101; + int64 metric_memory_used_mb = 102; + int64 metric_disk_mb = 103; + int64 metric_sandboxes_running = 104; +} + +message ServiceStatusChangeRequest { + ServiceInfoStatus service_status = 2; +} + +service InfoService { + rpc ServiceInfo(google.protobuf.Empty) returns (ServiceInfoResponse); + rpc ServiceStatusOverride(ServiceStatusChangeRequest) returns (google.protobuf.Empty); +} diff --git a/packages/orchestrator/internal/config/config.go b/packages/orchestrator/internal/config/config.go new file mode 100644 index 0000000..4722446 --- /dev/null +++ b/packages/orchestrator/internal/config/config.go @@ -0,0 +1,5 @@ +package config + +import "github.com/e2b-dev/infra/packages/shared/pkg/env" + +var AllowSandboxInternet = env.GetEnv("ALLOW_SANDBOX_INTERNET", "true") != "false" diff --git a/packages/orchestrator/internal/consul/client.go b/packages/orchestrator/internal/consul/client.go deleted file mode 100644 index 07fd22b..0000000 --- a/packages/orchestrator/internal/consul/client.go +++ /dev/null @@ -1,26 +0,0 @@ -package consul - -import ( - "fmt" - - "github.com/hashicorp/consul/api" - - "github.com/e2b-dev/infra/packages/shared/pkg/utils" -) - -var ( - consulToken = utils.RequiredEnv("CONSUL_TOKEN", "Consul token for authenticating requests to the Consul API") - Client = utils.Must(newClient()) -) - -func newClient() (*api.Client, error) { - config := api.DefaultConfig() - config.Token = consulToken - - consulClient, err := api.NewClient(config) - if err != nil { - return nil, fmt.Errorf("failed to initialize Consul client: %w", err) - } - - return consulClient, nil -} diff --git a/packages/orchestrator/internal/consul/node.go b/packages/orchestrator/internal/consul/node.go deleted file mode 100644 index 3b6323c..0000000 --- a/packages/orchestrator/internal/consul/node.go +++ /dev/null @@ -1,13 +0,0 @@ -package consul - -import ( - "github.com/e2b-dev/infra/packages/shared/pkg/utils" -) - -const shortNodeIDLength = 8 - -var ( - nodeID = utils.RequiredEnv("NODE_ID", "Nomad ID of the instance node") - // Node ID must be at least 8 characters long. - ClientID = nodeID[:shortNodeIDLength] -) diff --git a/packages/orchestrator/internal/dns/server.go b/packages/orchestrator/internal/dns/server.go deleted file mode 100644 index 8fb64f8..0000000 --- a/packages/orchestrator/internal/dns/server.go +++ /dev/null @@ -1,103 +0,0 @@ -package dns - -import ( - "context" - "fmt" - "log" - "net" - "strings" - "sync" - - resolver "github.com/miekg/dns" - - "github.com/e2b-dev/infra/packages/shared/pkg/smap" -) - -const ttl = 0 - -type DNS struct { - records *smap.Map[string] - - closer struct { - once sync.Once - op func(context.Context) error - err error - } -} - -func New() *DNS { - return &DNS{ - records: smap.New[string](), - } -} - -func (d *DNS) Add(sandboxID, ip string) { - d.records.Insert(d.hostname(sandboxID), ip) -} - -func (d *DNS) Remove(sandboxID, ip string) { - d.records.RemoveCb(d.hostname(sandboxID), func(key string, v string, exists bool) bool { - return v == ip - }) -} - -func (d *DNS) get(hostname string) (string, bool) { - return d.records.Get(hostname) -} - -func (*DNS) hostname(sandboxID string) string { - return fmt.Sprintf("%s.", sandboxID) -} - -func (d *DNS) handleDNSRequest(w resolver.ResponseWriter, r *resolver.Msg) { - m := new(resolver.Msg) - m.SetReply(r) - m.Compress = false - m.Authoritative = true - - for _, q := range m.Question { - if q.Qtype == resolver.TypeA { - sandboxID := strings.Split(q.Name, "-")[0] - ip, found := d.get(sandboxID) - if found { - a := &resolver.A{ - Hdr: resolver.RR_Header{ - Name: q.Name, - Rrtype: resolver.TypeA, - Class: resolver.ClassINET, - Ttl: ttl, - }, - A: net.ParseIP(ip).To4(), - } - - m.Answer = append(m.Answer, a) - } - } - } - - err := w.WriteMsg(m) - if err != nil { - log.Printf("Failed to write message: %s\n", err.Error()) - } -} - -func (d *DNS) Start(address string, port int) error { - mux := resolver.NewServeMux() - - mux.HandleFunc(".", d.handleDNSRequest) - - server := resolver.Server{Addr: fmt.Sprintf("%s:%d", address, port), Net: "udp", Handler: mux} - - if err := server.ListenAndServe(); err != nil { - return fmt.Errorf("DNS server encounterted error: %w", err) - } - - d.closer.op = server.ShutdownContext - - return nil -} - -func (d *DNS) Close(ctx context.Context) error { - d.closer.once.Do(func() { d.closer.err = d.closer.op(ctx) }) - return d.closer.err -} diff --git a/packages/orchestrator/internal/grpcserver/server.go b/packages/orchestrator/internal/grpcserver/server.go new file mode 100644 index 0000000..66ea731 --- /dev/null +++ b/packages/orchestrator/internal/grpcserver/server.go @@ -0,0 +1,165 @@ +package grpcserver + +import ( + "context" + "fmt" + "net" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/selector" + "github.com/soheilhy/cmux" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/keepalive" + + e2bhealthcheck "github.com/e2b-dev/infra/packages/orchestrator/internal/healthcheck" + "github.com/e2b-dev/infra/packages/orchestrator/internal/service" + e2bgrpc "github.com/e2b-dev/infra/packages/shared/pkg/grpc" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type GRPCServer struct { + info *service.ServiceInfo + + grpc *grpc.Server + grpcHealth *health.Server + + shutdown struct { + once sync.Once + op func(context.Context) error + err error + } +} + +func New(tracerProvider trace.TracerProvider, meterProvider metric.MeterProvider, info *service.ServiceInfo) *GRPCServer { + opts := []logging.Option{ + logging.WithLogOnEvents(logging.StartCall, logging.PayloadReceived, logging.PayloadSent, logging.FinishCall), + logging.WithLevels(logging.DefaultServerCodeToLevel), + logging.WithFieldsFromContext(logging.ExtractFields), + } + + ignoredLoggingRoutes := logger.WithoutRoutes( + logger.HealthCheckRoute, + "/TemplateService/TemplateBuildStatus", + "/TemplateService/HealthStatus", + "/InfoService/ServiceInfo", + ) + srv := grpc.NewServer( + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: 5 * time.Second, // Minimum time between pings from client + PermitWithoutStream: true, // Allow pings even when no active streams + }), + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: 15 * time.Second, // Server sends keepalive pings every 15s + Timeout: 5 * time.Second, // Wait 5s for response before considering dead + }), + grpc.StatsHandler(e2bgrpc.NewStatsWrapper(otelgrpc.NewServerHandler( + otelgrpc.WithTracerProvider(tracerProvider), + otelgrpc.WithMeterProvider(meterProvider), + ))), + grpc.ChainUnaryInterceptor( + recovery.UnaryServerInterceptor(), + selector.UnaryServerInterceptor( + logging.UnaryServerInterceptor(logger.GRPCLogger(zap.L()), opts...), + ignoredLoggingRoutes, + ), + ), + grpc.ChainStreamInterceptor( + selector.StreamServerInterceptor( + logging.StreamServerInterceptor(logger.GRPCLogger(zap.L()), opts...), + ignoredLoggingRoutes, + ), + ), + ) + + grpcHealth := health.NewServer() + grpc_health_v1.RegisterHealthServer(srv, grpcHealth) + + return &GRPCServer{ + info: info, + grpc: srv, + grpcHealth: grpcHealth, + } +} + +func (g *GRPCServer) HealthServer() *health.Server { + return g.grpcHealth +} + +func (g *GRPCServer) GRPCServer() *grpc.Server { + return g.grpc +} + +// Start launches +func (g *GRPCServer) Start(ctx context.Context, port uint) error { + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) + if err != nil { + return fmt.Errorf("failed to listen on port %d: %w", port, err) + } + + healthcheck, err := e2bhealthcheck.NewHealthcheck(g.info) + if err != nil { + return fmt.Errorf("failed to create healthcheck: %w", err) + } + + // Reuse the same TCP port between grpc and HTTP requests + m := cmux.New(lis) + // Match HTTP requests. + httpL := m.Match(cmux.HTTP1Fast()) + // Match gRPC requests. + grpcL := m.Match(cmux.Any()) + + zap.L().Info("Starting GRPC server", zap.Uint("port", port)) + + go func() { + if err := g.grpc.Serve(grpcL); err != nil { + zap.L().Fatal("grpc server failed to serve", zap.Error(err)) + } + }() + + // Start health check + go healthcheck.Start(ctx, httpL) + + g.shutdown.op = func(ctx context.Context) error { + // mark services as unhealthy so now new request will be accepted + // gRPC's Stop and GracefulStop will close the listener, so this will also close the listener for the health check + // we should probably wrap the listener with noop close, so we can close the listener ourselves + select { + case <-ctx.Done(): + zap.L().Info("Stopping grpc server") + g.grpc.Stop() + default: + zap.L().Info("Stopping grpc server gracefully") + g.grpc.GracefulStop() + } + m.Close() + + return nil + } + + // Start serving traffic, blocking call + return m.Serve() +} + +func (g *GRPCServer) Close(ctx context.Context) error { + g.shutdown.once.Do(func() { + if g.shutdown.op == nil { + // should only be true if there was an error + // during startup. + return + } + + g.shutdown.err = g.shutdown.op(ctx) + g.shutdown.op = nil + }) + + return g.shutdown.err +} diff --git a/packages/orchestrator/internal/healthcheck/healthcheck.go b/packages/orchestrator/internal/healthcheck/healthcheck.go new file mode 100644 index 0000000..a0d8597 --- /dev/null +++ b/packages/orchestrator/internal/healthcheck/healthcheck.go @@ -0,0 +1,79 @@ +package healthcheck + +import ( + "context" + "encoding/json" + "log" + "net" + "net/http" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/service" + e2borchestratorinfo "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator-info" + e2bHealth "github.com/e2b-dev/infra/packages/shared/pkg/health" +) + +type Healthcheck struct { + info *service.ServiceInfo + + lastRun time.Time + mu sync.RWMutex +} + +func NewHealthcheck(info *service.ServiceInfo) (*Healthcheck, error) { + return &Healthcheck{ + info: info, + + lastRun: time.Now(), + mu: sync.RWMutex{}, + }, nil +} + +func (h *Healthcheck) Start(_ context.Context, listener net.Listener) { + // Start /health HTTP server + routeMux := http.NewServeMux() + routeMux.HandleFunc("/health", h.healthHandler) + httpServer := &http.Server{ + Handler: routeMux, + } + + go func() { + zap.L().Info("Starting health server") + if err := httpServer.Serve(listener); err != nil { + log.Fatal(err) + } + }() +} + +func (h *Healthcheck) getStatus() e2bHealth.Status { + switch h.info.GetStatus() { + case e2borchestratorinfo.ServiceInfoStatus_OrchestratorHealthy: + return e2bHealth.Healthy + case e2borchestratorinfo.ServiceInfoStatus_OrchestratorDraining: + return e2bHealth.Draining + } + + return e2bHealth.Unhealthy +} + +func (h *Healthcheck) healthHandler(w http.ResponseWriter, r *http.Request) { + h.mu.RLock() + defer h.mu.RUnlock() + + status := h.getStatus() + response := e2bHealth.Response{Status: status, Version: h.info.SourceCommit} + + w.Header().Set("Content-Type", "application/json") + if status == e2bHealth.Unhealthy { + w.WriteHeader(http.StatusServiceUnavailable) + } else { + w.WriteHeader(http.StatusOK) + } + + if err := json.NewEncoder(w).Encode(response); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/packages/orchestrator/internal/metrics/sandboxes.go b/packages/orchestrator/internal/metrics/sandboxes.go new file mode 100644 index 0000000..149d378 --- /dev/null +++ b/packages/orchestrator/internal/metrics/sandboxes.go @@ -0,0 +1,208 @@ +package metrics + +import ( + "context" + "errors" + "fmt" + "math" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +const ( + sbxMemThresholdPct = 80 + sbxCpuThresholdPct = 80 + minEnvdVersionForMetrics = "0.1.5" + timeoutGetMetrics = 100 * time.Millisecond + metricsParallelismFactor = 5 // Used to calculate number of concurrently sandbox metrics requests + + shiftFromMiBToBytes = 20 // Shift to convert MiB to bytes +) + +type ( + GetSandboxMetricsFunc func(ctx context.Context) (*sandbox.Metrics, error) +) + +type SandboxObserver struct { + meterExporter sdkmetric.Exporter + registration metric.Registration + exportInterval time.Duration + + sandboxes *smap.Map[*sandbox.Sandbox] + + meter metric.Meter + cpuTotal metric.Int64ObservableGauge + cpuUsed metric.Float64ObservableGauge + memoryTotal metric.Int64ObservableGauge + memoryUsed metric.Int64ObservableGauge +} + +func NewSandboxObserver(ctx context.Context, commitSHA, clientID string, sandboxMetricsExportPeriod time.Duration, sandboxes *smap.Map[*sandbox.Sandbox]) (*SandboxObserver, error) { + deltaTemporality := otlpmetricgrpc.WithTemporalitySelector(func(kind sdkmetric.InstrumentKind) metricdata.Temporality { + // Use delta temporality for gauges and cumulative for all other instrument kinds. + // This is used to prevent reporting sandbox metrics indefinitely. + if kind == sdkmetric.InstrumentKindGauge { + return metricdata.DeltaTemporality + } + return metricdata.CumulativeTemporality + }) + + externalMeterExporter, err := telemetry.NewMeterExporter(ctx, deltaTemporality) + if err != nil { + return nil, fmt.Errorf("failed to create external meter exporter: %w", err) + } + + meterProvider, err := telemetry.NewMeterProvider(ctx, externalMeterExporter, sandboxMetricsExportPeriod, "external-metrics", commitSHA, clientID, sdkmetric.WithExemplarFilter(exemplar.AlwaysOffFilter)) + if err != nil { + return nil, fmt.Errorf("failed to create external metric provider: %w", err) + } + + meter := meterProvider.Meter("orchestrator.sandbox.metrics") + cpuTotal, err := telemetry.GetGaugeInt(meter, telemetry.SandboxCpuTotalGaugeName) + if err != nil { + return nil, fmt.Errorf("failed to create CPU total gauge: %w", err) + } + + cpuUsed, err := telemetry.GetGaugeFloat(meter, telemetry.SandboxCpuUsedGaugeName) + if err != nil { + return nil, fmt.Errorf("failed to create CPU used gauge: %w", err) + } + + memoryTotal, err := telemetry.GetGaugeInt(meter, telemetry.SandboxRamTotalGaugeName) + if err != nil { + return nil, fmt.Errorf("failed to create memory total gauge: %w", err) + } + + memoryUsed, err := telemetry.GetGaugeInt(meter, telemetry.SandboxRamUsedGaugeName) + if err != nil { + return nil, fmt.Errorf("failed to create memory used gauge: %w", err) + } + + so := &SandboxObserver{ + exportInterval: sandboxMetricsExportPeriod, + meterExporter: externalMeterExporter, + sandboxes: sandboxes, + meter: meter, + cpuTotal: cpuTotal, + cpuUsed: cpuUsed, + memoryTotal: memoryTotal, + memoryUsed: memoryUsed, + } + + registration, err := so.startObserving() + if err != nil { + return nil, fmt.Errorf("failed to start observing sandbox metrics: %w", err) + } + + // Register the callback to start observing sandbox metrics + so.registration = registration + + return so, nil +} + +func (so *SandboxObserver) startObserving() (metric.Registration, error) { + unregister, err := so.meter.RegisterCallback( + func(ctx context.Context, o metric.Observer) error { + sbxCount := so.sandboxes.Count() + + wg := errgroup.Group{} + // Run concurrently to prevent blocking if there are many sandboxes other callbacks + limit := math.Ceil(float64(sbxCount) / metricsParallelismFactor) + wg.SetLimit(int(limit)) + + for _, sbx := range so.sandboxes.Items() { + if !utils.IsGTEVersion(sbx.Config.EnvdVersion, minEnvdVersionForMetrics) { + continue + } + + if !sbx.Checks.UseClickhouseMetrics { + continue + } + + wg.Go(func() error { + // Make sure the sandbox doesn't change while we are getting metrics (the slot could be assigned to another sandbox) + sbxMetrics, err := sbx.Checks.GetMetrics(timeoutGetMetrics) + if err != nil { + // Sandbox has stopped + if errors.Is(err, sandbox.ErrChecksStopped) { + return nil + } + + return err + } + + attributes := metric.WithAttributes(attribute.String("sandbox_id", sbx.Config.SandboxId), attribute.String("team_id", sbx.Config.TeamId)) + o.ObserveInt64(so.cpuTotal, sbxMetrics.CPUCount, attributes) + o.ObserveFloat64(so.cpuUsed, sbxMetrics.CPUUsedPercent, attributes) + // Save as bytes for the future, so we can return more accurate values + o.ObserveInt64(so.memoryTotal, sbxMetrics.MemTotalMiB<= sbxMemThresholdPct { + sbxlogger.E(sbx).Warn("Memory usage threshold exceeded", + zap.Float32("mem_used_percent", memUsedPct), + zap.Float32("mem_threshold_percent", sbxMemThresholdPct), + ) + } + + if sbxMetrics.CPUUsedPercent >= sbxCpuThresholdPct { + sbxlogger.E(sbx).Warn("CPU usage threshold exceeded", + zap.Float32("cpu_used_percent", float32(sbxMetrics.CPUUsedPercent)), + zap.Float32("cpu_threshold_percent", sbxCpuThresholdPct), + ) + } + return nil + }) + } + + err := wg.Wait() + if err != nil { + // Log the error but observe other sandboxes + zap.L().Warn("error during observing sandbox metrics", zap.Error(err)) + } + + return nil + }, so.cpuTotal, so.cpuUsed, so.memoryTotal, so.memoryUsed) + if err != nil { + return nil, err + } + + return unregister, nil +} + +func (so *SandboxObserver) Close(ctx context.Context) error { + if so.meterExporter == nil { + return nil + } + + var errs []error + + if so.registration != nil { + if err := so.registration.Unregister(); err != nil { + errs = append(errs, fmt.Errorf("failed to unregister sandbox observer callback: %w", err)) + } + } + + if err := so.meterExporter.Shutdown(ctx); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown sandbox observer meter provider: %w", err)) + } + + return errors.Join(errs...) +} diff --git a/packages/orchestrator/internal/proxy/proxy.go b/packages/orchestrator/internal/proxy/proxy.go new file mode 100644 index 0000000..1f9b669 --- /dev/null +++ b/packages/orchestrator/internal/proxy/proxy.go @@ -0,0 +1,129 @@ +package proxy + +import ( + "context" + "fmt" + "net/http" + "net/url" + "time" + + "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + reverse_proxy "github.com/e2b-dev/infra/packages/shared/pkg/proxy" + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/pool" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +const ( + // This timeout should be > 600 (GCP LB upstream idle timeout) to prevent race condition + // Also it's a good practice to set it to higher values as you progress in the stack + // https://cloud.google.com/load-balancing/docs/https#timeouts_and_retries%23:~:text=The%20load%20balancer%27s%20backend%20keepalive,is%20greater%20than%20600%20seconds + idleTimeout = 620 * time.Second +) + +type SandboxProxy struct { + proxy *reverse_proxy.Proxy +} + +func NewSandboxProxy(meterProvider metric.MeterProvider, port uint, sandboxes *smap.Map[*sandbox.Sandbox]) (*SandboxProxy, error) { + proxy := reverse_proxy.New( + port, + idleTimeout, + func(r *http.Request) (*pool.Destination, error) { + sandboxId, port, err := reverse_proxy.ParseHost(r.Host) + if err != nil { + return nil, err + } + + sbx, found := sandboxes.Get(sandboxId) + if !found { + return nil, reverse_proxy.NewErrSandboxNotFound(sandboxId) + } + + url := &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", sbx.Slot.HostIPString(), port), + } + + return &pool.Destination{ + Url: url, + SandboxId: sbx.Config.SandboxId, + SandboxPort: port, + DefaultToPortError: true, + IncludeSandboxIdInProxyErrorLogger: true, + // We need to include id unique to sandbox to prevent reuse of connection to the same IP:port pair by different sandboxes reusing the network slot. + // We are not using sandbox id to prevent removing connections based on sandbox id (pause/resume race condition). + ConnectionKey: sbx.Config.ExecutionId, + RequestLogger: zap.L().With( + zap.String("host", r.Host), + logger.WithSandboxID(sbx.Config.SandboxId), + zap.String("sandbox_ip", sbx.Slot.HostIPString()), + logger.WithTeamID(sbx.Config.TeamId), + zap.String("sandbox_req_port", url.Port()), + zap.String("sandbox_req_path", r.URL.Path), + ), + }, nil + }, + ) + + meter := meterProvider.Meter("orchestrator.proxy.sandbox") + _, err := telemetry.GetObservableUpDownCounter(meter, telemetry.OrchestratorProxyServerConnectionsMeterCounterName, func(ctx context.Context, observer metric.Int64Observer) error { + observer.Observe(proxy.CurrentServerConnections()) + + return nil + }) + if err != nil { + return nil, fmt.Errorf("error registering orchestrator proxy connections metric (%s): %w", telemetry.OrchestratorProxyServerConnectionsMeterCounterName, err) + } + + _, err = telemetry.GetObservableUpDownCounter(meter, telemetry.OrchestratorProxyPoolConnectionsMeterCounterName, func(ctx context.Context, observer metric.Int64Observer) error { + observer.Observe(proxy.CurrentPoolConnections()) + + return nil + }) + if err != nil { + return nil, fmt.Errorf("error registering orchestrator proxy connections metric (%s): %w", telemetry.OrchestratorProxyPoolConnectionsMeterCounterName, err) + } + + _, err = telemetry.GetObservableUpDownCounter(meter, telemetry.OrchestratorProxyPoolSizeMeterCounterName, func(ctx context.Context, observer metric.Int64Observer) error { + observer.Observe(int64(proxy.CurrentPoolSize())) + + return nil + }) + if err != nil { + return nil, fmt.Errorf("error registering orchestrator proxy pool size metric (%s): %w", telemetry.OrchestratorProxyPoolSizeMeterCounterName, err) + } + + return &SandboxProxy{proxy}, nil +} + +func (p *SandboxProxy) Start() error { + return p.proxy.ListenAndServe() +} + +func (p *SandboxProxy) Close(ctx context.Context) error { + var err error + select { + case <-ctx.Done(): + err = p.proxy.Close() + default: + err = p.proxy.Shutdown(ctx) + } + if err != nil { + return fmt.Errorf("failed to shutdown proxy server: %w", err) + } + + return nil +} + +func (p *SandboxProxy) RemoveFromPool(connectionKey string) { + p.proxy.RemoveFromPool(connectionKey) +} + +func (p *SandboxProxy) GetAddr() string { + return p.proxy.Addr +} diff --git a/packages/orchestrator/internal/sandbox/block/cache.go b/packages/orchestrator/internal/sandbox/block/cache.go index f2c60e7..9bee16c 100644 --- a/packages/orchestrator/internal/sandbox/block/cache.go +++ b/packages/orchestrator/internal/sandbox/block/cache.go @@ -8,9 +8,11 @@ import ( "sort" "sync" "sync/atomic" + "syscall" "github.com/bits-and-blooms/bitset" "github.com/edsrzf/mmap-go" + "go.uber.org/zap" "golang.org/x/sys/unix" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" @@ -74,7 +76,7 @@ func (m *Cache) isClosed() bool { return m.closed.Load() } -func (m *Cache) Export(out io.Writer) (*bitset.BitSet, error) { +func (m *Cache) ExportToDiff(out io.Writer) (*header.DiffMetadata, error) { m.mu.Lock() defer m.mu.Unlock() @@ -87,22 +89,37 @@ func (m *Cache) Export(out io.Writer) (*bitset.BitSet, error) { return nil, fmt.Errorf("error flushing mmap: %w", err) } - tracked := bitset.New(uint(header.TotalBlocks(m.size, m.blockSize))) + dirty := bitset.New(uint(header.TotalBlocks(m.size, m.blockSize))) + empty := bitset.New(0) for _, key := range m.dirtySortedKeys() { - block := header.BlockIdx(key, m.blockSize) + blockIdx := header.BlockIdx(key, m.blockSize) - tracked.Set(uint(block)) + block := (*m.mmap)[key : key+m.blockSize] + isEmpty, err := header.IsEmptyBlock(block, m.blockSize) + if err != nil { + return nil, fmt.Errorf("error checking empty block: %w", err) + } + if isEmpty { + empty.Set(uint(blockIdx)) + continue + } - _, err := out.Write((*m.mmap)[key : key+m.blockSize]) + dirty.Set(uint(blockIdx)) + _, err = out.Write(block) if err != nil { - fmt.Printf("error writing to out: %v\n", err) + zap.L().Error("error writing to out", zap.Error(err)) return nil, err } } - return tracked, nil + return &header.DiffMetadata{ + Dirty: dirty, + Empty: empty, + + BlockSize: m.blockSize, + }, nil } func (m *Cache) ReadAt(b []byte, off int64) (int, error) { @@ -132,7 +149,7 @@ func (m *Cache) WriteAt(b []byte, off int64) (int, error) { return m.WriteAtWithoutLock(b, off) } -func (m *Cache) Close() error { +func (m *Cache) Close() (e error) { m.mu.Lock() defer m.mu.Unlock() @@ -141,10 +158,15 @@ func (m *Cache) Close() error { return NewErrCacheClosed(m.filePath) } - return errors.Join( - m.mmap.Unmap(), - os.RemoveAll(m.filePath), - ) + err := m.mmap.Unmap() + if err != nil { + e = errors.Join(e, fmt.Errorf("error unmapping mmap: %w", err)) + } + + // TODO: Move to to the scope of the caller + e = errors.Join(e, os.RemoveAll(m.filePath)) + + return e } func (m *Cache) Size() (int64, error) { @@ -223,3 +245,25 @@ func (m *Cache) dirtySortedKeys() []int64 { return keys } + +func (m *Cache) MarkAllAsDirty() { + m.setIsCached(0, m.size) +} + +// FileSize returns the size of the cache on disk. +// The size might differ from the dirty size, as it may not be fully on disk. +func (m *Cache) FileSize() (int64, error) { + var stat syscall.Stat_t + err := syscall.Stat(m.filePath, &stat) + if err != nil { + return 0, fmt.Errorf("failed to get file stats: %w", err) + } + + var fsStat syscall.Statfs_t + err = syscall.Statfs(m.filePath, &fsStat) + if err != nil { + return 0, fmt.Errorf("failed to get disk stats for path %s: %w", m.filePath, err) + } + + return int64(stat.Blocks) * int64(fsStat.Bsize), nil +} diff --git a/packages/orchestrator/internal/sandbox/block/chunk.go b/packages/orchestrator/internal/sandbox/block/chunk.go index aa2b5d2..187ace7 100644 --- a/packages/orchestrator/internal/sandbox/block/chunk.go +++ b/packages/orchestrator/internal/sandbox/block/chunk.go @@ -6,6 +6,7 @@ import ( "fmt" "io" + "go.uber.org/zap" "golang.org/x/sync/errgroup" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" @@ -118,8 +119,7 @@ func (c *Chunker) fetchToCache(off, length int64) error { eg.Go(func() (err error) { defer func() { if r := recover(); r != nil { - fmt.Println("Recovered from panic in the fetch handler:", r) - + zap.L().Error("recovered from panic in the fetch handler", zap.Any("error", r)) err = fmt.Errorf("recovered from panic in the fetch handler: %v", r) } }() @@ -161,3 +161,7 @@ func (c *Chunker) fetchToCache(off, length int64) error { func (c *Chunker) Close() error { return c.cache.Close() } + +func (c *Chunker) FileSize() (int64, error) { + return c.cache.FileSize() +} diff --git a/packages/orchestrator/internal/sandbox/block/device.go b/packages/orchestrator/internal/sandbox/block/device.go index a3c6f21..13a3598 100644 --- a/packages/orchestrator/internal/sandbox/block/device.go +++ b/packages/orchestrator/internal/sandbox/block/device.go @@ -1,6 +1,10 @@ package block -import "io" +import ( + "io" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" +) type ErrBytesNotAvailable struct{} @@ -10,12 +14,14 @@ func (ErrBytesNotAvailable) Error() string { type ReadonlyDevice interface { io.ReaderAt + io.Closer Slice(off, length int64) ([]byte, error) Size() (int64, error) + BlockSize() int64 + Header() *header.Header } type Device interface { ReadonlyDevice io.WriterAt - Close() error } diff --git a/packages/orchestrator/internal/sandbox/block/local.go b/packages/orchestrator/internal/sandbox/block/local.go index 78cedd3..5b63df8 100644 --- a/packages/orchestrator/internal/sandbox/block/local.go +++ b/packages/orchestrator/internal/sandbox/block/local.go @@ -5,18 +5,20 @@ import ( "fmt" "os" - "github.com/edsrzf/mmap-go" - "golang.org/x/sys/unix" + "github.com/google/uuid" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" ) type Local struct { - m mmap.MMap - size int64 + f *os.File path string + + header *header.Header } -func NewLocal(path string) (*Local, error) { - f, err := os.OpenFile(path, os.O_RDONLY, 0o777) +func NewLocal(path string, blockSize int64, buildID uuid.UUID) (*Local, error) { + f, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm) if err != nil { return nil, fmt.Errorf("failed to open file: %w", err) } @@ -26,17 +28,16 @@ func NewLocal(path string) (*Local, error) { return nil, fmt.Errorf("failed to get file info: %w", err) } - defer f.Close() - - m, err := mmap.Map(f, unix.PROT_READ, mmap.RDONLY) - if err != nil { - return nil, fmt.Errorf("failed to map region: %w", err) - } + h := header.NewHeader(header.NewTemplateMetadata( + buildID, + uint64(blockSize), + uint64(info.Size()), + ), nil) return &Local{ - m: m, - size: info.Size(), - path: path, + f: f, + path: path, + header: h, }, nil } @@ -50,21 +51,54 @@ func (d *Local) ReadAt(p []byte, off int64) (int, error) { } func (d *Local) Size() (int64, error) { - return d.size, nil + return int64(d.header.Metadata.Size), nil } -func (d *Local) Close() error { - return errors.Join( - d.m.Unmap(), - os.Remove(d.path), - ) +func (d *Local) BlockSize() int64 { + return int64(d.header.Metadata.BlockSize) +} + +func (d *Local) Close() (e error) { + defer func() { + e = errors.Join(e, os.Remove(d.path)) + }() + + err := d.f.Close() + if err != nil { + return fmt.Errorf("error closing file: %w", err) + } + + return nil } func (d *Local) Slice(off, length int64) ([]byte, error) { end := off + length - if end > d.size { - end = d.size + size := int64(d.header.Metadata.Size) + if end > size { + end = size + length = end - off } - return d.m[off:end], nil + out := make([]byte, length) + _, err := d.f.ReadAt(out, off) + if err != nil { + return nil, err + } + + return out, nil +} + +func (d *Local) Header() *header.Header { + return d.header +} + +func (d *Local) UpdateSize() error { + info, err := d.f.Stat() + if err != nil { + return fmt.Errorf("failed to get file info: %w", err) + } + + d.header.Metadata.Size = uint64(info.Size()) + + return nil } diff --git a/packages/orchestrator/internal/sandbox/block/overlay.go b/packages/orchestrator/internal/sandbox/block/overlay.go index 6deb4a4..680a19b 100644 --- a/packages/orchestrator/internal/sandbox/block/overlay.go +++ b/packages/orchestrator/internal/sandbox/block/overlay.go @@ -69,6 +69,10 @@ func (o *Overlay) Size() (int64, error) { return o.cache.Size() } +func (o *Overlay) BlockSize() int64 { + return o.blockSize +} + func (o *Overlay) Close() error { if o.cacheEjected.Load() { return nil @@ -76,3 +80,7 @@ func (o *Overlay) Close() error { return o.cache.Close() } + +func (o *Overlay) Header() *header.Header { + return o.device.Header() +} diff --git a/packages/orchestrator/internal/sandbox/build/build.go b/packages/orchestrator/internal/sandbox/build/build.go index bb46cff..3a0cd7e 100644 --- a/packages/orchestrator/internal/sandbox/build/build.go +++ b/packages/orchestrator/internal/sandbox/build/build.go @@ -5,25 +5,30 @@ import ( "io" "github.com/google/uuid" + "go.uber.org/zap" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" ) type File struct { - header *header.Header - store *DiffStore - fileType DiffType + header *header.Header + store *DiffStore + fileType DiffType + persistence storage.StorageProvider } func NewFile( header *header.Header, store *DiffStore, fileType DiffType, + persistence storage.StorageProvider, ) *File { return &File{ - header: header, - store: store, - fileType: fileType, + header: header, + store: store, + fileType: fileType, + persistence: persistence, } } @@ -47,8 +52,8 @@ func (b *File) ReadAt(p []byte, off int64) (n int, err error) { readLength := min(mappedLength, remainingReadLength) if readLength <= 0 { - fmt.Printf( - "(%d bytes left to read, off %d) reading %d bytes from %+v/%+v: [%d:] -> [%d:%d] <> %d (mapped length: %d, remaining read length: %d)\n", + zap.L().Error(fmt.Sprintf( + "(%d bytes left to read, off %d) reading %d bytes from %+v/%+v: [%d:] -> [%d:%d] <> %d (mapped length: %d, remaining read length: %d)\n>>> EOF\n", len(p)-n, off, readLength, @@ -60,9 +65,7 @@ func (b *File) ReadAt(p []byte, off int64) (n int, err error) { n, mappedLength, remainingReadLength, - ) - - fmt.Printf(">>> EOF\n") + )) return n, io.EOF } @@ -102,6 +105,7 @@ func (b *File) Slice(off, length int64) ([]byte, error) { return nil, fmt.Errorf("failed to get mapping: %w", err) } + // Pass empty huge page when the build id is nil. if *buildID == uuid.Nil { return header.EmptyHugePage, nil } @@ -115,11 +119,15 @@ func (b *File) Slice(off, length int64) ([]byte, error) { } func (b *File) getBuild(buildID *uuid.UUID) (Diff, error) { - source, err := b.store.Get( + storageDiff := newStorageDiff( + b.store.cachePath, buildID.String(), b.fileType, int64(b.header.Metadata.BlockSize), + b.persistence, ) + + source, err := b.store.Get(storageDiff) if err != nil { return nil, fmt.Errorf("failed to get build from store: %w", err) } diff --git a/packages/orchestrator/internal/sandbox/build/cache.go b/packages/orchestrator/internal/sandbox/build/cache.go index 77a7786..2471897 100644 --- a/packages/orchestrator/internal/sandbox/build/cache.go +++ b/packages/orchestrator/internal/sandbox/build/cache.go @@ -4,58 +4,92 @@ import ( "context" "fmt" "os" + "sync" "time" "github.com/jellydator/ttlcache/v3" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" +const ( + ToMBShift = 20 + fallbackDiffSize = 100 << ToMBShift ) -const buildExpiration = time.Hour * 25 +const DefaultCachePath = "/orchestrator/build" -const cachePath = "/orchestrator/build" +type deleteDiff struct { + size int64 + cancel chan struct{} +} type DiffStore struct { - bucket *s3.BucketHandle - cache *ttlcache.Cache[string, Diff] - ctx context.Context + cachePath string + cache *ttlcache.Cache[DiffStoreKey, Diff] + ctx context.Context + close chan struct{} + + // pdSizes is used to keep track of the diff sizes + // that are scheduled for deletion, as this won't show up in the disk usage. + pdSizes map[DiffStoreKey]*deleteDiff + pdMu sync.RWMutex + pdDelay time.Duration } -func NewDiffStore(bucket *s3.BucketHandle, ctx context.Context) (*DiffStore, error) { +func NewDiffStore(ctx context.Context, cachePath string, ttl, delay time.Duration, maxUsedPercentage float64) (*DiffStore, error) { + err := os.MkdirAll(cachePath, 0o755) + if err != nil { + return nil, fmt.Errorf("failed to create cache directory: %w", err) + } + cache := ttlcache.New( - ttlcache.WithTTL[string, Diff](buildExpiration), + ttlcache.WithTTL[DiffStoreKey, Diff](ttl), ) - cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[string, Diff]) { + ds := &DiffStore{ + cachePath: cachePath, + cache: cache, + ctx: ctx, + close: make(chan struct{}), + pdSizes: make(map[DiffStoreKey]*deleteDiff), + pdDelay: delay, + } + + cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[DiffStoreKey, Diff]) { buildData := item.Value() + // buildData will be deleted by calling buildData.Close() + defer ds.resetDelete(item.Key()) - err := buildData.Close() + err = buildData.Close() if err != nil { - fmt.Printf("[build data cache]: failed to cleanup build data for item %s: %v\n", item.Key(), err) + zap.L().Warn("failed to cleanup build data cache for item", zap.Any("item_key", item.Key()), zap.Error(err)) } }) - err := os.MkdirAll(cachePath, 0o755) - if err != nil { - return nil, fmt.Errorf("failed to create cache directory: %w", err) - } - go cache.Start() + go ds.startDiskSpaceEviction(maxUsedPercentage) - return &DiffStore{ - bucket: bucket, - cache: cache, - ctx: ctx, - }, nil + return ds, nil } -func (s *DiffStore) Get(buildId string, diffType DiffType, blockSize int64) (Diff, error) { - diff := newStorageDiff(buildId, diffType, blockSize) +type DiffStoreKey string + +func GetDiffStoreKey(buildID string, diffType DiffType) DiffStoreKey { + return DiffStoreKey(fmt.Sprintf("%s/%s", buildID, diffType)) +} +func (s *DiffStore) Close() { + close(s.close) + s.cache.Stop() +} + +func (s *DiffStore) Get(diff Diff) (Diff, error) { + s.resetDelete(diff.CacheKey()) source, found := s.cache.GetOrSet( diff.CacheKey(), diff, - ttlcache.WithTTL[string, Diff](buildExpiration), + ttlcache.WithTTL[DiffStoreKey, Diff](ttlcache.DefaultTTL), ) value := source.Value() @@ -64,7 +98,7 @@ func (s *DiffStore) Get(buildId string, diffType DiffType, blockSize int64) (Dif } if !found { - err := diff.Init(s.ctx, s.bucket) + err := diff.Init(s.ctx) if err != nil { return nil, fmt.Errorf("failed to init source: %w", err) } @@ -73,8 +107,164 @@ func (s *DiffStore) Get(buildId string, diffType DiffType, blockSize int64) (Dif return value, nil } -func (s *DiffStore) Add(buildId string, t DiffType, d Diff) { - storagePath := storagePath(buildId, t) +func (s *DiffStore) Add(d Diff) { + s.resetDelete(d.CacheKey()) + s.cache.Set(d.CacheKey(), d, ttlcache.DefaultTTL) +} + +func (s *DiffStore) Has(d Diff) bool { + return s.cache.Has(d.CacheKey()) +} + +func (s *DiffStore) startDiskSpaceEviction(threshold float64) { + getDelay := func(fast bool) time.Duration { + if fast { + return time.Microsecond + } else { + return time.Second + } + } + + timer := time.NewTimer(getDelay(false)) + defer timer.Stop() + + for { + select { + case <-s.ctx.Done(): + return + case <-s.close: + return + case <-timer.C: + dUsed, dTotal, err := diskUsage(s.cachePath) + if err != nil { + zap.L().Error("failed to get disk usage", zap.Error(err)) + timer.Reset(getDelay(false)) + continue + } + + pUsed := s.getPendingDeletesSize() + used := int64(dUsed) - pUsed + percentage := float64(used) / float64(dTotal) * 100 + + if percentage <= threshold { + timer.Reset(getDelay(false)) + continue + } + + succ, err := s.deleteOldestFromCache() + if err != nil { + zap.L().Error("failed to delete oldest item from cache", zap.Error(err)) + timer.Reset(getDelay(false)) + continue + } + + // Item evicted, reset timer to fast check + timer.Reset(getDelay(succ)) + } + } +} + +func (s *DiffStore) getPendingDeletesSize() int64 { + s.pdMu.RLock() + defer s.pdMu.RUnlock() + + var pendingSize int64 + for _, value := range s.pdSizes { + pendingSize += value.size + } + return pendingSize +} + +// deleteOldestFromCache deletes the oldest item (smallest TTL) from the cache. +// ttlcache has items in order by TTL +func (s *DiffStore) deleteOldestFromCache() (suc bool, e error) { + defer func() { + // Because of bug in ttlcache RangeBackwards method, we need to handle potential panic until it gets fixed + if r := recover(); r != nil { + e = fmt.Errorf("recovered from panic in deleteOldestFromCache: %v", r) + suc = false + + zap.L().Error("recovered from panic in deleteOldestFromCache", zap.Error(e)) + } + }() + + success := false + s.cache.RangeBackwards(func(item *ttlcache.Item[DiffStoreKey, Diff]) bool { + isDeleted := s.isBeingDeleted(item.Key()) + if isDeleted { + return true + } + + sfSize, err := item.Value().FileSize() + if err != nil { + zap.L().Warn("failed to get size of deleted item from cache", zap.Error(err)) + sfSize = fallbackDiffSize + } + + s.scheduleDelete(item.Key(), sfSize) + + success = true + return false + }) + + return success, e +} + +func (s *DiffStore) resetDelete(key DiffStoreKey) { + s.pdMu.Lock() + defer s.pdMu.Unlock() + + dDiff, f := s.pdSizes[key] + if !f { + return + } + + close(dDiff.cancel) + delete(s.pdSizes, key) +} + +func (s *DiffStore) isBeingDeleted(key DiffStoreKey) bool { + s.pdMu.RLock() + defer s.pdMu.RUnlock() + + _, f := s.pdSizes[key] + return f +} + +func (s *DiffStore) scheduleDelete(key DiffStoreKey, dSize int64) { + s.pdMu.Lock() + defer s.pdMu.Unlock() + + cancelCh := make(chan struct{}) + s.pdSizes[key] = &deleteDiff{ + size: dSize, + cancel: cancelCh, + } + + // Delay cache (file close/removal) deletion, + // this is to prevent race conditions with exposed slices, + // pending data fetching, or data upload + go (func() { + select { + case <-s.ctx.Done(): + case <-cancelCh: + case <-time.After(s.pdDelay): + s.cache.Delete(key) + } + })() +} + +func diskUsage(path string) (uint64, uint64, error) { + var stat unix.Statfs_t + err := unix.Statfs(path, &stat) + if err != nil { + return 0, 0, fmt.Errorf("failed to get disk stats for path %s: %w", path, err) + } + + // Available blocks * size per block = available space in bytes + free := stat.Bavail * uint64(stat.Bsize) + total := stat.Blocks * uint64(stat.Bsize) + used := total - free - s.cache.Set(storagePath, d, buildExpiration) + return used, total, nil } diff --git a/packages/orchestrator/internal/sandbox/build/cache_test.go b/packages/orchestrator/internal/sandbox/build/cache_test.go new file mode 100644 index 0000000..dd2e10e --- /dev/null +++ b/packages/orchestrator/internal/sandbox/build/cache_test.go @@ -0,0 +1,278 @@ +package build + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + tmpBuildCachePrefix = "test-build-cache_" + + blockSize = int64(1024) +) + +func newDiff(t *testing.T, cachePath, buildId string, diffType DiffType, blockSize int64) Diff { + localDiff, err := NewLocalDiffFile(cachePath, buildId, diffType) + assert.NoError(t, err) + + // Write 100 bytes to the file + n, err := localDiff.WriteAt(make([]byte, 100), 0) + assert.NoError(t, err) + assert.Equal(t, 100, n) + + diff, err := localDiff.CloseToDiff(blockSize) + assert.NoError(t, err) + + return diff +} + +func createTempDir(t *testing.T) string { + tempDir, err := os.MkdirTemp("", tmpBuildCachePrefix) + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + t.Cleanup(func() { + os.RemoveAll(tempDir) + }) + + t.Logf("Temp dir: %s\n", tempDir) + return tempDir +} + +func TestNewDiffStore(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + store, err := NewDiffStore( + ctx, + cachePath, + 25*time.Hour, + 60*time.Second, + 90.0, + ) + t.Cleanup(store.Close) + + assert.NoError(t, err) + assert.NotNil(t, store) +} + +func TestDiffStoreTTLEviction(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ttl := 1 * time.Second + delay := 60 * time.Second + store, err := NewDiffStore( + ctx, + cachePath, + ttl, + delay, + 100.0, + ) + t.Cleanup(store.Close) + assert.NoError(t, err) + + // Add an item to the cache + diff := newDiff(t, cachePath, "build-test-id", Rootfs, blockSize) + + // Add an item to the cache + store.Add(diff) + + // Expire diff + time.Sleep(ttl + time.Second) + + found := store.Has(diff) + assert.False(t, found) +} + +func TestDiffStoreRefreshTTLEviction(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ttl := 1 * time.Second + delay := 60 * time.Second + store, err := NewDiffStore( + ctx, + cachePath, + ttl, + delay, + 100.0, + ) + t.Cleanup(store.Close) + assert.NoError(t, err) + + // Add an item to the cache + diff := newDiff(t, cachePath, "build-test-id", Rootfs, blockSize) + + // Add an item to the cache + store.Add(diff) + + // Refresh diff expiration + time.Sleep(ttl / 2) + _, err = store.Get(diff) + assert.NoError(t, err) + + // Try to expire diff + time.Sleep(ttl/2 + time.Microsecond) + + // Is still in cache + found2 := store.Has(diff) + assert.True(t, found2) +} + +func TestDiffStoreDelayEviction(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ttl := 60 * time.Second + delay := 4 * time.Second + store, err := NewDiffStore( + ctx, + cachePath, + ttl, + delay, + 0.0, + ) + t.Cleanup(store.Close) + assert.NoError(t, err) + + // Add an item to the cache + diff := newDiff(t, cachePath, "build-test-id", Rootfs, blockSize) + + // Add an item to the cache + store.Add(diff) + + // Wait for removal trigger of diff + time.Sleep(2 * time.Second) + + // Verify still in cache + found := store.Has(diff) + assert.True(t, found) + dFound := store.isBeingDeleted(diff.CacheKey()) + assert.True(t, dFound) + + // Wait for complete removal of diff + time.Sleep(delay) + + found = store.Has(diff) + assert.False(t, found) + dFound = store.isBeingDeleted(diff.CacheKey()) + assert.False(t, dFound) +} + +func TestDiffStoreDelayEvictionAbort(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ttl := 60 * time.Second + delay := 4 * time.Second + store, err := NewDiffStore( + ctx, + cachePath, + ttl, + delay, + 0.0, + ) + t.Cleanup(store.Close) + assert.NoError(t, err) + + // Add an item to the cache + diff := newDiff(t, cachePath, "build-test-id", Rootfs, blockSize) + + // Add an item to the cache + store.Add(diff) + + // Wait for removal trigger of diff + time.Sleep(delay / 2) + + // Verify still in cache + found := store.Has(diff) + assert.True(t, found) + dFound := store.isBeingDeleted(diff.CacheKey()) + assert.True(t, dFound) + + // Abort removal of diff + _, err = store.Get(diff) + assert.NoError(t, err) + + found = store.Has(diff) + assert.True(t, found) + dFound = store.isBeingDeleted(diff.CacheKey()) + assert.False(t, dFound) + + // Check insufficient delay cancellation of diff and verify it's still in the cache + // after the delay period + time.Sleep(delay/2 + time.Second) + found = store.Has(diff) + assert.True(t, found) +} + +func TestDiffStoreOldestFromCache(t *testing.T) { + cachePath := createTempDir(t) + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + ttl := 60 * time.Second + delay := 4 * time.Second + store, err := NewDiffStore( + ctx, + cachePath, + ttl, + delay, + 100.0, + ) + t.Cleanup(store.Close) + assert.NoError(t, err) + + // Add items to the cache + diff := newDiff(t, cachePath, "build-test-id", Rootfs, blockSize) + store.Add(diff) + diff2 := newDiff(t, cachePath, "build-test-id-2", Rootfs, blockSize) + store.Add(diff2) + + found := store.Has(diff) + assert.True(t, found) + + // Delete oldest item + _, err = store.deleteOldestFromCache() + assert.NoError(t, err) + + assert.True(t, store.isBeingDeleted(diff.CacheKey())) + // Wait for removal trigger of diff + time.Sleep(delay + time.Second) + + // Verify oldest item is deleted + found = store.Has(diff) + assert.False(t, found) + + found = store.Has(diff2) + assert.True(t, found) + + // Add another item to the cache + diff3 := newDiff(t, cachePath, "build-test-id-3", Rootfs, blockSize) + store.Add(diff3) + + // Delete oldest item + _, err = store.deleteOldestFromCache() + assert.NoError(t, err) + + assert.True(t, store.isBeingDeleted(diff2.CacheKey())) + // Wait for removal trigger of diff + time.Sleep(delay + time.Second) + + // Verify oldest item is deleted + found = store.Has(diff2) + assert.False(t, found) + + found = store.Has(diff3) + assert.True(t, found) +} diff --git a/packages/orchestrator/internal/sandbox/build/diff.go b/packages/orchestrator/internal/sandbox/build/diff.go index 9c1797c..08ad0b4 100644 --- a/packages/orchestrator/internal/sandbox/build/diff.go +++ b/packages/orchestrator/internal/sandbox/build/diff.go @@ -1,6 +1,7 @@ package build import ( + "context" "io" "github.com/e2b-dev/infra/packages/shared/pkg/storage" @@ -23,7 +24,10 @@ type Diff interface { io.Closer io.ReaderAt Slice(off, length int64) ([]byte, error) + CacheKey() DiffStoreKey CachePath() (string, error) + FileSize() (int64, error) + Init(ctx context.Context) error } type NoDiff struct{} @@ -43,3 +47,15 @@ func (n *NoDiff) Close() error { func (n *NoDiff) ReadAt(p []byte, off int64) (int, error) { return 0, ErrNoDiff{} } + +func (n *NoDiff) FileSize() (int64, error) { + return 0, ErrNoDiff{} +} + +func (n *NoDiff) CacheKey() DiffStoreKey { + return "" +} + +func (n *NoDiff) Init(ctx context.Context) error { + return ErrNoDiff{} +} diff --git a/packages/orchestrator/internal/sandbox/build/local_diff.go b/packages/orchestrator/internal/sandbox/build/local_diff.go index e4355e0..970f7bf 100644 --- a/packages/orchestrator/internal/sandbox/build/local_diff.go +++ b/packages/orchestrator/internal/sandbox/build/local_diff.go @@ -1,6 +1,7 @@ package build import ( + "context" "fmt" "os" "path/filepath" @@ -12,16 +13,18 @@ import ( type LocalDiffFile struct { *os.File cachePath string + cacheKey DiffStoreKey } func NewLocalDiffFile( + basePath string, buildId string, diffType DiffType, ) (*LocalDiffFile, error) { cachePathSuffix := id.Generate() cacheFile := fmt.Sprintf("%s-%s-%s", buildId, diffType, cachePathSuffix) - cachePath := filepath.Join(cachePath, cacheFile) + cachePath := filepath.Join(basePath, cacheFile) f, err := os.OpenFile(cachePath, os.O_RDWR|os.O_CREATE, 0o644) if err != nil { @@ -31,10 +34,11 @@ func NewLocalDiffFile( return &LocalDiffFile{ File: f, cachePath: cachePath, + cacheKey: GetDiffStoreKey(buildId, diffType), }, nil } -func (f *LocalDiffFile) ToDiff( +func (f *LocalDiffFile) CloseToDiff( blockSize int64, ) (Diff, error) { defer f.Close() @@ -53,17 +57,24 @@ func (f *LocalDiffFile) ToDiff( return &NoDiff{}, nil } - return newLocalDiff(f.cachePath, size.Size(), blockSize) + return newLocalDiff( + f.cacheKey, + f.cachePath, + size.Size(), + blockSize, + ) } type localDiff struct { size int64 blockSize int64 + cacheKey DiffStoreKey cachePath string cache *block.Cache } func newLocalDiff( + cacheKey DiffStoreKey, cachePath string, size int64, blockSize int64, @@ -76,6 +87,7 @@ func newLocalDiff( return &localDiff{ size: size, blockSize: blockSize, + cacheKey: cacheKey, cachePath: cachePath, cache: cache, }, nil @@ -96,3 +108,15 @@ func (b *localDiff) ReadAt(p []byte, off int64) (int, error) { func (b *localDiff) Slice(off, length int64) ([]byte, error) { return b.cache.Slice(off, length) } + +func (b *localDiff) FileSize() (int64, error) { + return b.cache.FileSize() +} + +func (b *localDiff) CacheKey() DiffStoreKey { + return b.cacheKey +} + +func (b *localDiff) Init(ctx context.Context) error { + return nil +} diff --git a/packages/orchestrator/internal/sandbox/build/storage_diff.go b/packages/orchestrator/internal/sandbox/build/storage_diff.go index f1da51e..8ed6351 100644 --- a/packages/orchestrator/internal/sandbox/build/storage_diff.go +++ b/packages/orchestrator/internal/sandbox/build/storage_diff.go @@ -8,7 +8,7 @@ import ( "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" "github.com/e2b-dev/infra/packages/shared/pkg/id" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" + storage "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/utils" ) @@ -19,51 +19,56 @@ func storagePath(buildId string, diffType DiffType) string { type StorageDiff struct { chunker *utils.SetOnce[*block.Chunker] cachePath string + cacheKey DiffStoreKey storagePath string blockSize int64 + persistence storage.StorageProvider } func newStorageDiff( + basePath string, buildId string, diffType DiffType, blockSize int64, + persistence storage.StorageProvider, ) *StorageDiff { cachePathSuffix := id.Generate() storagePath := storagePath(buildId, diffType) cacheFile := fmt.Sprintf("%s-%s-%s", buildId, diffType, cachePathSuffix) - cachePath := filepath.Join(cachePath, cacheFile) + cachePath := filepath.Join(basePath, cacheFile) return &StorageDiff{ storagePath: storagePath, cachePath: cachePath, chunker: utils.NewSetOnce[*block.Chunker](), blockSize: blockSize, + persistence: persistence, + cacheKey: GetDiffStoreKey(buildId, diffType), } } -func (b *StorageDiff) CacheKey() string { - return b.storagePath +func (b *StorageDiff) CacheKey() DiffStoreKey { + return b.cacheKey } -func (b *StorageDiff) Init(ctx context.Context, bucket *s3.BucketHandle) error { - obj := s3.NewObject(ctx, bucket, b.storagePath) +func (b *StorageDiff) Init(ctx context.Context) error { + obj, err := b.persistence.OpenObject(ctx, b.storagePath) + if err != nil { + return err + } size, err := obj.Size() if err != nil { errMsg := fmt.Errorf("failed to get object size: %w", err) - b.chunker.SetError(errMsg) - return errMsg } chunker, err := block.NewChunker(ctx, size, b.blockSize, obj, b.cachePath) if err != nil { errMsg := fmt.Errorf("failed to create chunker: %w", err) - b.chunker.SetError(errMsg) - return errMsg } @@ -110,3 +115,12 @@ func (b *StorageDiff) WriteTo(w io.Writer) (int64, error) { func (b *StorageDiff) CachePath() (string, error) { return b.cachePath, nil } + +func (b *StorageDiff) FileSize() (int64, error) { + c, err := b.chunker.Wait() + if err != nil { + return 0, err + } + + return c.FileSize() +} diff --git a/packages/orchestrator/internal/sandbox/checks.go b/packages/orchestrator/internal/sandbox/checks.go index 57b4544..f118840 100644 --- a/packages/orchestrator/internal/sandbox/checks.go +++ b/packages/orchestrator/internal/sandbox/checks.go @@ -2,89 +2,103 @@ package sandbox import ( "context" - "fmt" - "io" - "net/http" + "errors" + "sync/atomic" "time" - "github.com/e2b-dev/infra/packages/shared/pkg/consts" - "github.com/e2b-dev/infra/packages/shared/pkg/utils" - "golang.org/x/mod/semver" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" ) const ( - healthCheckInterval = 20 * time.Second - metricsCheckInterval = 60 * time.Second - minEnvdVersionForMetrcis = "0.1.5" + healthCheckInterval = 20 * time.Second + healthCheckTimeout = 100 * time.Millisecond ) -func (s *Sandbox) logHeathAndUsage(ctx *utils.LockableCancelableContext) { +type Checks struct { + sandbox *Sandbox + + ctx context.Context + cancelCtx context.CancelCauseFunc + + healthy atomic.Bool + + UseClickhouseMetrics bool +} + +var ErrChecksStopped = errors.New("checks stopped") + +func NewChecks(ctx context.Context, tracer trace.Tracer, sandbox *Sandbox, useClickhouseMetrics bool) (*Checks, error) { + _, childSpan := tracer.Start(ctx, "checks-create") + defer childSpan.End() + + // Create background context, passed ctx is from create/resume request and will be canceled after the request is processed. + ctx, cancel := context.WithCancelCause(context.Background()) + h := &Checks{ + sandbox: sandbox, + ctx: ctx, + cancelCtx: cancel, + healthy: atomic.Bool{}, // defaults to `false` + UseClickhouseMetrics: useClickhouseMetrics, + } + // By default, the sandbox should be healthy, if the status change we report it. + h.healthy.Store(true) + return h, nil +} + +func (c *Checks) Start() { + c.logHealth() +} + +func (c *Checks) Stop() { + c.cancelCtx(ErrChecksStopped) +} + +func (c *Checks) logHealth() { healthTicker := time.NewTicker(healthCheckInterval) - metricsTicker := time.NewTicker(metricsCheckInterval) defer func() { healthTicker.Stop() - metricsTicker.Stop() }() - // Get metrics on sandbox startup - go s.LogMetrics(ctx) + // Get metrics and health status on sandbox startup + go c.Healthcheck(false) for { select { case <-healthTicker.C: - childCtx, cancel := context.WithTimeout(ctx, time.Second) - - ctx.Lock() - s.Healthcheck(childCtx, false) - ctx.Unlock() - - cancel() - case <-metricsTicker.C: - s.LogMetrics(ctx) - case <-ctx.Done(): + c.Healthcheck(false) + case <-c.ctx.Done(): return } } } -func (s *Sandbox) Healthcheck(ctx context.Context, alwaysReport bool) { - var err error - defer func() { - s.Logger.Healthcheck(err == nil, alwaysReport) - }() - - address := fmt.Sprintf("http://%s:%d/health", s.Slot.HostIP(), consts.DefaultEnvdServerPort) - - request, err := http.NewRequestWithContext(ctx, "GET", address, nil) - if err != nil { - return - } - - response, err := httpClient.Do(request) - if err != nil { +func (c *Checks) Healthcheck(alwaysReport bool) { + ok, err := c.GetHealth(healthCheckTimeout) + // Sandbox stopped + if errors.Is(err, ErrChecksStopped) { return } - defer response.Body.Close() - if response.StatusCode != http.StatusNoContent { - err = fmt.Errorf("unexpected status code: %d", response.StatusCode) + if !ok && c.healthy.CompareAndSwap(true, false) { + sbxlogger.E(c.sandbox).Healthcheck(sbxlogger.Fail) + sbxlogger.I(c.sandbox).Error("healthcheck failed", zap.Error(err)) return } - _, err = io.Copy(io.Discard, response.Body) - if err != nil { + if ok && c.healthy.CompareAndSwap(false, true) { + sbxlogger.E(c.sandbox).Healthcheck(sbxlogger.Success) return } -} -func isGTEVersion(curVersion, minVersion string) bool { - if len(curVersion) > 0 && curVersion[0] != 'v' { - curVersion = "v" + curVersion - } - - if !semver.IsValid(curVersion) { - return false + if alwaysReport { + if ok { + sbxlogger.E(c.sandbox).Healthcheck(sbxlogger.ReportSuccess) + } else { + sbxlogger.E(c.sandbox).Healthcheck(sbxlogger.ReportFail) + sbxlogger.I(c.sandbox).Error("control healthcheck failed", zap.Error(err)) + } } - - return semver.Compare(curVersion, minVersion) >= 0 } diff --git a/packages/orchestrator/internal/sandbox/cleanup.go b/packages/orchestrator/internal/sandbox/cleanup.go index 43dd7e0..1c9ba86 100644 --- a/packages/orchestrator/internal/sandbox/cleanup.go +++ b/packages/orchestrator/internal/sandbox/cleanup.go @@ -1,50 +1,80 @@ package sandbox import ( + "context" "errors" "fmt" "os" "sync" + "sync/atomic" + + "go.uber.org/zap" "github.com/e2b-dev/infra/packages/shared/pkg/storage" ) type Cleanup struct { - cleanup []func() error - priorityCleanup []func() error + cleanup []func(ctx context.Context) error + priorityCleanup []func(ctx context.Context) error error error once sync.Once + + hasRun atomic.Bool + mu sync.Mutex } func NewCleanup() *Cleanup { return &Cleanup{} } -func (c *Cleanup) Add(f func() error) { +func (c *Cleanup) Add(f func(ctx context.Context) error) { + if c.hasRun.Load() == true { + zap.L().Error("Add called after cleanup has run, ignoring function") + return + } + + c.mu.Lock() + defer c.mu.Unlock() + c.cleanup = append(c.cleanup, f) } -func (c *Cleanup) AddPriority(f func() error) { +func (c *Cleanup) AddPriority(f func(ctx context.Context) error) { + if c.hasRun.Load() == true { + zap.L().Error("AddPriority called after cleanup has run, ignoring function") + return + } + + c.mu.Lock() + defer c.mu.Unlock() + c.priorityCleanup = append(c.priorityCleanup, f) } -func (c *Cleanup) Run() error { - c.once.Do(c.run) +func (c *Cleanup) Run(ctx context.Context) error { + c.once.Do(func() { + c.run(ctx) + }) return c.error } -func (c *Cleanup) run() { +func (c *Cleanup) run(ctx context.Context) { + c.hasRun.Store(true) + + c.mu.Lock() + defer c.mu.Unlock() + var errs []error for i := len(c.priorityCleanup) - 1; i >= 0; i-- { - err := c.priorityCleanup[i]() + err := c.priorityCleanup[i](ctx) if err != nil { errs = append(errs, err) } } for i := len(c.cleanup) - 1; i >= 0; i-- { - err := c.cleanup[i]() + err := c.cleanup[i](ctx) if err != nil { errs = append(errs, err) } diff --git a/packages/orchestrator/internal/sandbox/diffcreator.go b/packages/orchestrator/internal/sandbox/diffcreator.go new file mode 100644 index 0000000..b274cc5 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/diffcreator.go @@ -0,0 +1,61 @@ +package sandbox + +import ( + "context" + "errors" + "fmt" + "io" + "os" + + "github.com/bits-and-blooms/bitset" + "go.opentelemetry.io/otel/trace" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/rootfs" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" +) + +type DiffCreator interface { + process(ctx context.Context, out io.Writer) (*header.DiffMetadata, error) +} + +type RootfsDiffCreator struct { + rootfs rootfs.Provider + stopHook func(context.Context) error +} + +func (r *RootfsDiffCreator) process(ctx context.Context, out io.Writer) (*header.DiffMetadata, error) { + return r.rootfs.ExportDiff(ctx, out, r.stopHook) +} + +type MemoryDiffCreator struct { + tracer trace.Tracer + memfile *storage.TemporaryMemfile + dirtyPages *bitset.BitSet + blockSize int64 + doneHook func(context.Context) error +} + +func (r *MemoryDiffCreator) process(ctx context.Context, out io.Writer) (h *header.DiffMetadata, e error) { + defer func() { + err := r.doneHook(ctx) + if err != nil { + e = errors.Join(e, err) + } + }() + + memfileSource, err := os.Open(r.memfile.Path()) + if err != nil { + return nil, fmt.Errorf("failed to open memfile: %w", err) + } + defer memfileSource.Close() + + return header.WriteDiffWithTrace( + ctx, + r.tracer, + memfileSource, + r.blockSize, + r.dirtyPages, + out, + ) +} diff --git a/packages/orchestrator/internal/sandbox/envd.go b/packages/orchestrator/internal/sandbox/envd.go index dc5a880..bf7b932 100644 --- a/packages/orchestrator/internal/sandbox/envd.go +++ b/packages/orchestrator/internal/sandbox/envd.go @@ -14,87 +14,66 @@ import ( "github.com/e2b-dev/infra/packages/shared/pkg/consts" ) -const maxRetries = 120 - -func (s *Sandbox) syncOldEnvd(ctx context.Context) error { - address := fmt.Sprintf("http://%s:%d/sync", s.Slot.HostIP(), consts.OldEnvdServerPort) +const ( + requestTimeout = 50 * time.Millisecond + loopDelay = 5 * time.Millisecond +) - var response *http.Response - for i := 0; i < maxRetries; i++ { - reqCtx, cancel := context.WithTimeout(ctx, 50*time.Millisecond) - request, err := http.NewRequestWithContext(reqCtx, "POST", address, nil) +// doRequestWithInfiniteRetries does a request with infinite retries until the context is done. +// The parent context should have a deadline or a timeout. +func doRequestWithInfiniteRetries(ctx context.Context, method, address string, requestBody []byte, accessToken *string) (*http.Response, error) { + for { + reqCtx, cancel := context.WithTimeout(ctx, requestTimeout) + request, err := http.NewRequestWithContext(reqCtx, method, address, bytes.NewReader(requestBody)) if err != nil { cancel() - return err + return nil, err } - response, err = httpClient.Do(request) - if err == nil { - cancel() - break + // make sure request to already authorized envd will not fail + // this can happen in sandbox resume and in some edge cases when previous request was success, but we continued + if accessToken != nil { + request.Header.Set("X-Access-Token", *accessToken) } + response, err := httpClient.Do(request) cancel() - time.Sleep(5 * time.Millisecond) - } - - if response == nil { - return fmt.Errorf("failed to sync envd") - } - _, err := io.Copy(io.Discard, response.Body) - if err != nil { - return err - } + if err == nil { + return response, nil + } - err = response.Body.Close() - if err != nil { - return err + select { + case <-ctx.Done(): + return nil, fmt.Errorf("%w with cause: %w", ctx.Err(), context.Cause(ctx)) + case <-time.After(loopDelay): + } } - - return nil } type PostInitJSONBody struct { - EnvVars *map[string]string `json:"envVars"` + EnvVars *map[string]string `json:"envVars"` + AccessToken *string `json:"accessToken,omitempty"` } -func (s *Sandbox) initEnvd(ctx context.Context, tracer trace.Tracer, envVars map[string]string) error { +func (s *Sandbox) initEnvd(ctx context.Context, tracer trace.Tracer, envVars map[string]string, accessToken *string) error { childCtx, childSpan := tracer.Start(ctx, "envd-init") defer childSpan.End() - address := fmt.Sprintf("http://%s:%d/init", s.Slot.HostIP(), consts.DefaultEnvdServerPort) - + address := fmt.Sprintf("http://%s:%d/init", s.Slot.HostIPString(), consts.DefaultEnvdServerPort) jsonBody := &PostInitJSONBody{ - EnvVars: &envVars, + EnvVars: &envVars, + AccessToken: accessToken, } - envVarsJSON, err := json.Marshal(jsonBody) + body, err := json.Marshal(jsonBody) if err != nil { return err } - var response *http.Response - for i := 0; i < maxRetries; i++ { - reqCtx, cancel := context.WithTimeout(childCtx, 50*time.Millisecond) - request, err := http.NewRequestWithContext(reqCtx, "POST", address, bytes.NewReader(envVarsJSON)) - if err != nil { - cancel() - return err - } - - response, err = httpClient.Do(request) - if err == nil { - cancel() - break - } - - cancel() - time.Sleep(5 * time.Millisecond) - } - - if response == nil { - return fmt.Errorf("failed to init envd") + response, err := doRequestWithInfiniteRetries(childCtx, "POST", address, body, accessToken) + if err != nil { + return fmt.Errorf("failed to init envd: %w", err) } defer response.Body.Close() diff --git a/packages/orchestrator/internal/sandbox/fc/client.go b/packages/orchestrator/internal/sandbox/fc/client_linux.go similarity index 51% rename from packages/orchestrator/internal/sandbox/fc/client.go rename to packages/orchestrator/internal/sandbox/fc/client_linux.go index 6a8d59f..affb4b5 100644 --- a/packages/orchestrator/internal/sandbox/fc/client.go +++ b/packages/orchestrator/internal/sandbox/fc/client_linux.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + package fc import ( @@ -114,6 +117,7 @@ func (c *apiClient) createSnapshot( snapshotConfig := operations.CreateSnapshotParams{ Context: ctx, Body: &models.SnapshotCreateParams{ + SnapshotType: models.SnapshotCreateParamsSnapshotTypeFull, MemFilePath: &memfilePath, SnapshotPath: &snapfilePath, }, @@ -140,3 +144,123 @@ func (c *apiClient) setMmds(ctx context.Context, metadata *MmdsMetadata) error { return nil } + +func (c *apiClient) setBootSource(ctx context.Context, kernelArgs string, kernelPath string) error { + bootSourceConfig := operations.PutGuestBootSourceParams{ + Context: ctx, + Body: &models.BootSource{ + BootArgs: kernelArgs, + KernelImagePath: &kernelPath, + }, + } + + _, err := c.client.Operations.PutGuestBootSource(&bootSourceConfig) + if err != nil { + return fmt.Errorf("error setting fc boot source config: %w", err) + } + + return nil +} + +func (c *apiClient) setRootfsDrive(ctx context.Context, rootfsPath string) error { + rootfs := "rootfs" + ioEngine := "Async" + isRootDevice := true + driversConfig := operations.PutGuestDriveByIDParams{ + Context: ctx, + DriveID: rootfs, + Body: &models.Drive{ + DriveID: &rootfs, + PathOnHost: rootfsPath, + IsRootDevice: &isRootDevice, + IsReadOnly: false, + IoEngine: &ioEngine, + }, + } + + _, err := c.client.Operations.PutGuestDriveByID(&driversConfig) + if err != nil { + return fmt.Errorf("error setting fc drivers config: %w", err) + } + + return nil +} + +func (c *apiClient) setNetworkInterface(ctx context.Context, ifaceID string, tapName string, tapMac string) error { + networkConfig := operations.PutGuestNetworkInterfaceByIDParams{ + Context: ctx, + IfaceID: ifaceID, + Body: &models.NetworkInterface{ + IfaceID: &ifaceID, + GuestMac: tapMac, + HostDevName: &tapName, + }, + } + + _, err := c.client.Operations.PutGuestNetworkInterfaceByID(&networkConfig) + if err != nil { + return fmt.Errorf("error setting fc network config: %w", err) + } + + mmdsVersion := "V2" + mmdsConfig := operations.PutMmdsConfigParams{ + Context: ctx, + Body: &models.MmdsConfig{ + Version: &mmdsVersion, + NetworkInterfaces: []string{ifaceID}, + }, + } + + _, err = c.client.Operations.PutMmdsConfig(&mmdsConfig) + if err != nil { + return fmt.Errorf("error setting network mmds data: %w", err) + } + + return nil +} + +func (c *apiClient) setMachineConfig( + ctx context.Context, + vCPUCount int64, + memoryMB int64, + hugePages bool, +) error { + smt := true + trackDirtyPages := false + machineConfig := &models.MachineConfiguration{ + VcpuCount: &vCPUCount, + MemSizeMib: &memoryMB, + Smt: &smt, + TrackDirtyPages: &trackDirtyPages, + } + if hugePages { + machineConfig.HugePages = models.MachineConfigurationHugePagesNr2M + } + machineConfigParams := operations.PutMachineConfigurationParams{ + Context: ctx, + Body: machineConfig, + } + + _, err := c.client.Operations.PutMachineConfiguration(&machineConfigParams) + if err != nil { + return fmt.Errorf("error setting fc machine config: %w", err) + } + return nil +} + +func (c *apiClient) startVM(ctx context.Context) error { + start := models.InstanceActionInfoActionTypeInstanceStart + startActionParams := operations.CreateSyncActionParams{ + Context: ctx, + Info: &models.InstanceActionInfo{ + ActionType: &start, + }, + } + + _, err := c.client.Operations.CreateSyncAction(&startActionParams) + if err != nil { + return fmt.Errorf("error starting fc: %w", err) + } + + return nil +} diff --git a/packages/orchestrator/internal/sandbox/fc/client_other.go b/packages/orchestrator/internal/sandbox/fc/client_other.go new file mode 100644 index 0000000..9eee2cd --- /dev/null +++ b/packages/orchestrator/internal/sandbox/fc/client_other.go @@ -0,0 +1,36 @@ +//go:build !linux +// +build !linux + +package fc + +import ( + "context" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" +) + +type apiClient struct{} + +func newApiClient(socketPath string) *apiClient { + return nil +} + +func (c *apiClient) loadSnapshot(ctx context.Context, uffdSocketPath string, uffdReady chan struct{}, snapfile template.File) error { + return nil +} + +func (c *apiClient) resumeVM(ctx context.Context) error { + return nil +} + +func (c *apiClient) setMmds(ctx context.Context, metadata *MmdsMetadata) error { + return nil +} + +func (c *apiClient) pauseVM(ctx context.Context) error { + return nil +} + +func (c *apiClient) createSnapshot(ctx context.Context, snapfilePath string, memfilePath string) error { + return nil +} diff --git a/packages/orchestrator/internal/sandbox/fc/kernel_args.go b/packages/orchestrator/internal/sandbox/fc/kernel_args.go new file mode 100644 index 0000000..0ff6ac8 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/fc/kernel_args.go @@ -0,0 +1,22 @@ +package fc + +import ( + "fmt" + "sort" + "strings" +) + +type KernelArgs map[string]string + +func (ka KernelArgs) String() string { + args := make([]string, 0, len(ka)) + for k, v := range ka { + if v == "" { + args = append(args, k) + } else { + args = append(args, fmt.Sprintf("%s=%s", k, v)) + } + } + sort.Strings(args) // optional: for consistent output + return strings.Join(args, " ") +} diff --git a/packages/orchestrator/internal/sandbox/fc/process.go b/packages/orchestrator/internal/sandbox/fc/process.go index 8aef293..c91d367 100644 --- a/packages/orchestrator/internal/sandbox/fc/process.go +++ b/packages/orchestrator/internal/sandbox/fc/process.go @@ -1,11 +1,11 @@ package fc import ( - "bufio" "bytes" "context" "errors" "fmt" + "io" "os" "os/exec" "syscall" @@ -13,14 +13,16 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "go.uber.org/zap/zapio" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/rootfs" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/socket" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" - "github.com/e2b-dev/infra/packages/shared/pkg/logs" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" ) const startScript = `mount --make-rprivate / && @@ -32,38 +34,48 @@ ip netns exec {{ .namespaceID }} {{ .firecrackerPath }} --api-sock {{ .firecrack var startScriptTemplate = txtTemplate.Must(txtTemplate.New("fc-start").Parse(startScript)) -type Process struct { - uffdReady chan struct{} - snapfile template.File +type ProcessOptions struct { + // InitScriptPath is the path to the init script that will be executed inside the VM on kernel start. + InitScriptPath string - cmd *exec.Cmd + // KernelLogs is a flag to enable kernel logs output to the process stdout. + KernelLogs bool + // SystemdToKernelLogs is a flag to enable systemd logs output to the console. + // It enabled the kernel logs by default too. + SystemdToKernelLogs bool - metadata *MmdsMetadata + // Stdout is the writer to which the process stdout will be written. + Stdout io.Writer + // Stderr is the writer to which the process stderr will be written. + Stderr io.Writer +} + +type Process struct { + cmd *exec.Cmd - uffdSocketPath string firecrackerSocketPath string - rootfs *rootfs.CowDevice - files *storage.SandboxFiles + slot *network.Slot + rootfsPath string + files *storage.SandboxFiles Exit chan error client *apiClient + + buildRootfsPath string } func NewProcess( ctx context.Context, tracer trace.Tracer, - slot network.Slot, + slot *network.Slot, files *storage.SandboxFiles, - mmdsMetadata *MmdsMetadata, - snapfile template.File, - rootfs *rootfs.CowDevice, - uffdReady chan struct{}, + rootfsPath string, baseTemplateID string, + baseBuildID string, ) (*Process, error) { childCtx, childSpan := tracer.Start(ctx, "initialize-fc", trace.WithAttributes( - attribute.String("sandbox.id", mmdsMetadata.SandboxId), attribute.Int("sandbox.slot.index", slot.Idx), )) defer childSpan.End() @@ -72,17 +84,17 @@ func NewProcess( baseBuild := storage.NewTemplateFiles( baseTemplateID, - rootfs.BaseBuildId, + baseBuildID, files.KernelVersion, files.FirecrackerVersion, - files.Hugepages(), ) + buildRootfsPath := baseBuild.SandboxRootfsPath() err := startScriptTemplate.Execute(&fcStartScript, map[string]interface{}{ "rootfsPath": files.SandboxCacheRootfsLinkPath(), "kernelPath": files.CacheKernelPath(), - "buildDir": baseBuild.BuildDir(), - "buildRootfsPath": baseBuild.BuildRootfsPath(), + "buildDir": baseBuild.SandboxBuildDir(), + "buildRootfsPath": buildRootfsPath, "buildKernelPath": files.BuildKernelPath(), "buildKernelDir": files.BuildKernelDir(), "namespaceID": slot.NamespaceID(), @@ -97,6 +109,16 @@ func NewProcess( attribute.String("sandbox.cmd", fcStartScript.String()), ) + _, err = os.Stat(files.FirecrackerPath()) + if err != nil { + return nil, fmt.Errorf("error stating firecracker binary: %w", err) + } + + _, err = os.Stat(files.CacheKernelPath()) + if err != nil { + return nil, fmt.Errorf("error stating kernel file: %w", err) + } + cmd := exec.Command( "unshare", "-pfm", @@ -113,70 +135,50 @@ func NewProcess( return &Process{ Exit: make(chan error, 1), - uffdReady: uffdReady, cmd: cmd, firecrackerSocketPath: files.SandboxFirecrackerSocketPath(), - metadata: mmdsMetadata, - uffdSocketPath: files.SandboxUffdSocketPath(), - snapfile: snapfile, client: newApiClient(files.SandboxFirecrackerSocketPath()), - rootfs: rootfs, + rootfsPath: rootfsPath, files: files, + slot: slot, + + buildRootfsPath: buildRootfsPath, }, nil } -func (p *Process) Start( +func (p *Process) configure( ctx context.Context, tracer trace.Tracer, - logger *logs.SandboxLogger, + sandboxID string, + templateID string, + teamID string, + stdoutExternal io.Writer, + stderrExternal io.Writer, ) error { - childCtx, childSpan := tracer.Start(ctx, "start-fc") + childCtx, childSpan := tracer.Start(ctx, "configure-fc") defer childSpan.End() - stdoutReader, err := p.cmd.StdoutPipe() - if err != nil { - return fmt.Errorf("error creating fc stdout pipe: %w", err) + sbxMetadata := sbxlogger.SandboxMetadata{ + SandboxID: sandboxID, + TemplateID: templateID, + TeamID: teamID, } - go func() { - // The stdout should be closed with the process cmd automatically, as it uses the StdoutPipe() - // TODO: Better handling of processing all logs before calling wait - scanner := bufio.NewScanner(stdoutReader) - - for scanner.Scan() { - line := scanner.Text() - - logger.Infof("[sandbox %s]: stdout: %s\n", p.metadata.SandboxId, line) - } - - readerErr := scanner.Err() - if readerErr != nil { - logger.Errorf("[sandbox %s]: error reading fc stdout: %v\n", p.metadata.SandboxId, readerErr) - } - }() - - stderrReader, err := p.cmd.StderrPipe() - if err != nil { - return fmt.Errorf("error creating fc stderr pipe: %w", err) + stdoutWriter := &zapio.Writer{Log: sbxlogger.I(sbxMetadata).Logger, Level: zap.InfoLevel} + stdoutWriters := []io.Writer{stdoutWriter} + if stdoutExternal != nil { + stdoutWriters = append(stdoutWriters, stdoutExternal) } + p.cmd.Stdout = io.MultiWriter(stdoutWriters...) - go func() { - // The stderr should be closed with the process cmd automatically, as it uses the StderrPipe() - // TODO: Better handling of processing all logs before calling wait - scanner := bufio.NewScanner(stderrReader) - - for scanner.Scan() { - line := scanner.Text() - logger.Warnf("[sandbox %s]: stderr: %s\n", p.metadata.SandboxId, line) - } - - readerErr := scanner.Err() - if readerErr != nil { - logger.Errorf("[sandbox %s]: error reading fc stderr: %v\n", p.metadata.SandboxId, readerErr) - } - }() + stderrWriter := &zapio.Writer{Log: sbxlogger.I(sbxMetadata).Logger, Level: zap.ErrorLevel} + stderrWriters := []io.Writer{stderrWriter} + if stderrExternal != nil { + stderrWriters = append(stderrWriters, stderrExternal) + } + p.cmd.Stderr = io.MultiWriter(stderrWriters...) - err = os.Symlink("/dev/null", p.files.SandboxCacheRootfsLinkPath()) + err := utils.SymlinkForce("/dev/null", p.files.SandboxCacheRootfsLinkPath()) if err != nil { return fmt.Errorf("error symlinking rootfs: %w", err) } @@ -190,6 +192,9 @@ func (p *Process) Start( defer cancelStart(fmt.Errorf("fc finished starting")) go func() { + defer stderrWriter.Close() + defer stdoutWriter.Close() + waitErr := p.cmd.Wait() if waitErr != nil { var exitErr *exec.ExitError @@ -202,8 +207,9 @@ func (p *Process) Start( } } - errMsg := fmt.Errorf("error waiting for fc process: %w", waitErr) + zap.L().Error("error waiting for fc process", zap.Error(waitErr)) + errMsg := fmt.Errorf("error waiting for fc process: %w", waitErr) p.Exit <- errMsg cancelStart(errMsg) @@ -224,26 +230,160 @@ func (p *Process) Start( return errors.Join(errMsg, fcStopErr) } - device, err := p.rootfs.Path() + return nil +} + +func (p *Process) Create( + ctx context.Context, + tracer trace.Tracer, + sandboxID string, + templateID string, + teamID string, + vCPUCount int64, + memoryMB int64, + hugePages bool, + options ProcessOptions, +) error { + childCtx, childSpan := tracer.Start(ctx, "create-fc") + defer childSpan.End() + + err := p.configure( + childCtx, + tracer, + sandboxID, + templateID, + teamID, + options.Stdout, + options.Stderr, + ) + if err != nil { + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error starting fc process: %w", err), fcStopErr) + } + + // IPv4 configuration - format: [local_ip]::[gateway_ip]:[netmask]:hostname:iface:dhcp_option:[dns] + ipv4 := fmt.Sprintf("%s::%s:%s:instance:%s:off:%s", p.slot.NamespaceIP(), p.slot.TapIPString(), p.slot.TapMaskString(), p.slot.VpeerName(), p.slot.TapName()) + args := KernelArgs{ + // Disable kernel logs for production to speed the FC operations + // https://github.com/firecracker-microvm/firecracker/blob/main/docs/prod-host-setup.md#logging-and-performance + "quiet": "", + "loglevel": "1", + + // Define kernel init path + "init": options.InitScriptPath, + + // Networking IPv4 and IPv6 + "ip": ipv4, + "ipv6.disable": "0", + "ipv6.autoconf": "1", + + // Wait 1 second before exiting FC after panic or reboot + "panic": "1", + + "reboot": "k", + "pci": "off", + "i8042.nokbd": "", + "i8042.noaux": "", + "random.trust_cpu": "on", + } + if options.SystemdToKernelLogs { + args["systemd.journald.forward_to_console"] = "" + } + if options.KernelLogs || options.SystemdToKernelLogs { + // Forward kernel logs to the ttyS0, which will be picked up by the stdout of FC process + delete(args, "quiet") + args["console"] = "ttyS0" + args["loglevel"] = "5" // KERN_NOTICE + } + + kernelArgs := args.String() + err = p.client.setBootSource(childCtx, kernelArgs, p.files.BuildKernelPath()) if err != nil { - return fmt.Errorf("error getting rootfs path: %w", err) + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error setting fc boot source config: %w", err), fcStopErr) + } + telemetry.ReportEvent(childCtx, "set fc boot source config") + + // Rootfs + err = utils.SymlinkForce(p.rootfsPath, p.files.SandboxCacheRootfsLinkPath()) + if err != nil { + return fmt.Errorf("error symlinking rootfs: %w", err) } - err = os.Remove(p.files.SandboxCacheRootfsLinkPath()) + err = p.client.setRootfsDrive(childCtx, p.buildRootfsPath) if err != nil { - return fmt.Errorf("error removing rootfs symlink: %w", err) + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error setting fc drivers config: %w", err), fcStopErr) + } + telemetry.ReportEvent(childCtx, "set fc drivers config") + + // Network + err = p.client.setNetworkInterface(childCtx, p.slot.VpeerName(), p.slot.TapName(), p.slot.TapMAC()) + if err != nil { + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error setting fc network config: %w", err), fcStopErr) + } + telemetry.ReportEvent(childCtx, "set fc network config") + + err = p.client.setMachineConfig(childCtx, vCPUCount, memoryMB, hugePages) + if err != nil { + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error setting fc machine config: %w", err), fcStopErr) } + telemetry.ReportEvent(childCtx, "set fc machine config") - err = os.Symlink(device, p.files.SandboxCacheRootfsLinkPath()) + err = p.client.startVM(childCtx) + if err != nil { + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error starting fc: %w", err), fcStopErr) + } + + telemetry.ReportEvent(childCtx, "started fc") + return nil +} + +func (p *Process) Resume( + ctx context.Context, + tracer trace.Tracer, + mmdsMetadata *MmdsMetadata, + uffdSocketPath string, + snapfile template.File, + uffdReady chan struct{}, +) error { + childCtx, childSpan := tracer.Start(ctx, "resume-fc") + defer childSpan.End() + + err := p.configure( + childCtx, + tracer, + mmdsMetadata.SandboxId, + mmdsMetadata.TemplateId, + mmdsMetadata.TeamId, + nil, + nil, + ) + if err != nil { + fcStopErr := p.Stop() + + return errors.Join(fmt.Errorf("error starting fc process: %w", err), fcStopErr) + } + + err = utils.SymlinkForce(p.rootfsPath, p.files.SandboxCacheRootfsLinkPath()) if err != nil { return fmt.Errorf("error symlinking rootfs: %w", err) } err = p.client.loadSnapshot( - startCtx, - p.uffdSocketPath, - p.uffdReady, - p.snapfile, + childCtx, + uffdSocketPath, + uffdReady, + snapfile, ) if err != nil { fcStopErr := p.Stop() @@ -251,14 +391,14 @@ func (p *Process) Start( return errors.Join(fmt.Errorf("error loading snapshot: %w", err), fcStopErr) } - err = p.client.resumeVM(startCtx) + err = p.client.resumeVM(childCtx) if err != nil { fcStopErr := p.Stop() return errors.Join(fmt.Errorf("error resuming vm: %w", err), fcStopErr) } - err = p.client.setMmds(startCtx, p.metadata) + err = p.client.setMmds(childCtx, mmdsMetadata) if err != nil { fcStopErr := p.Stop() @@ -289,7 +429,7 @@ func (p *Process) Stop() error { err := p.cmd.Process.Kill() if err != nil { - return fmt.Errorf("failed to send KILL to FC process: %w", err) + zap.L().Warn("failed to send KILL to FC process", zap.Error(err)) } return nil @@ -302,7 +442,7 @@ func (p *Process) Pause(ctx context.Context, tracer trace.Tracer) error { return p.client.pauseVM(ctx) } -// VM needs to be paused before creating a snapshot. +// CreateSnapshot VM needs to be paused before creating a snapshot. func (p *Process) CreateSnapshot(ctx context.Context, tracer trace.Tracer, snapfilePath string, memfilePath string) error { ctx, childSpan := tracer.Start(ctx, "create-snapshot-fc") defer childSpan.End() diff --git a/packages/orchestrator/internal/sandbox/health.go b/packages/orchestrator/internal/sandbox/health.go new file mode 100644 index 0000000..573c200 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/health.go @@ -0,0 +1,42 @@ +package sandbox + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + "github.com/e2b-dev/infra/packages/shared/pkg/consts" +) + +func (c *Checks) GetHealth(timeout time.Duration) (bool, error) { + ctx, cancel := context.WithTimeout(c.ctx, timeout) + defer cancel() + + address := fmt.Sprintf("http://%s:%d/health", c.sandbox.Slot.HostIPString(), consts.DefaultEnvdServerPort) + + request, err := http.NewRequestWithContext(ctx, "GET", address, nil) + if err != nil { + return false, err + } + + response, err := httpClient.Do(request) + if err != nil { + return false, err + } + defer func() { + // Drain the response body to reuse the connection + // From response.Body docstring: + // // The default HTTP client's Transport may not reuse HTTP/1.x "keep-alive" TCP connections + // if the Body is not read to completion and closed. + io.Copy(io.Discard, response.Body) + response.Body.Close() + }() + + if response.StatusCode != http.StatusNoContent { + return false, fmt.Errorf("unexpected status code: %d", response.StatusCode) + } + + return true, nil +} diff --git a/packages/orchestrator/internal/sandbox/metrics.go b/packages/orchestrator/internal/sandbox/metrics.go index 03f92c5..390bbcf 100644 --- a/packages/orchestrator/internal/sandbox/metrics.go +++ b/packages/orchestrator/internal/sandbox/metrics.go @@ -5,54 +5,50 @@ import ( "encoding/json" "fmt" "net/http" + "time" "github.com/e2b-dev/infra/packages/shared/pkg/consts" ) -type SandboxMetrics struct { +type Metrics struct { Timestamp int64 `json:"ts"` // Unix Timestamp in UTC - CPUCount uint32 `json:"cpu_count"` // Total CPU cores - CPUUsedPercent float32 `json:"cpu_used_pct"` // Percent rounded to 2 decimal places - MemTotalMiB uint64 `json:"mem_total_mib"` // Total virtual memory in MiB - MemUsedMiB uint64 `json:"mem_used_mib"` // Used virtual memory in MiB + CPUCount int64 `json:"cpu_count"` // Total CPU cores + CPUUsedPercent float64 `json:"cpu_used_pct"` // Percent rounded to 2 decimal places + MemTotalMiB int64 `json:"mem_total_mib"` // Total virtual memory in MiB + MemUsedMiB int64 `json:"mem_used_mib"` // Used virtual memory in MiB } -func (s *Sandbox) GetMetrics(ctx context.Context) (SandboxMetrics, error) { - address := fmt.Sprintf("http://%s:%d/metrics", s.Slot.HostIP(), consts.DefaultEnvdServerPort) +func (c *Checks) GetMetrics(timeout time.Duration) (*Metrics, error) { + ctx, cancel := context.WithTimeout(c.ctx, timeout) + defer cancel() + + address := fmt.Sprintf("http://%s:%d/metrics", c.sandbox.Slot.HostIPString(), consts.DefaultEnvdServerPort) request, err := http.NewRequestWithContext(ctx, "GET", address, nil) if err != nil { - return SandboxMetrics{}, err + return nil, err + } + + if c.sandbox.Metadata.Config.EnvdAccessToken != nil { + request.Header.Set("X-Access-Token", *c.sandbox.Metadata.Config.EnvdAccessToken) } response, err := httpClient.Do(request) if err != nil { - return SandboxMetrics{}, err + return nil, err } defer response.Body.Close() if response.StatusCode != http.StatusOK { err = fmt.Errorf("unexpected status code: %d", response.StatusCode) - return SandboxMetrics{}, err + return nil, err } - var metrics SandboxMetrics - err = json.NewDecoder(response.Body).Decode(&metrics) + var m Metrics + err = json.NewDecoder(response.Body).Decode(&m) if err != nil { - return SandboxMetrics{}, err + return nil, err } - return metrics, nil -} - -func (s *Sandbox) LogMetrics(ctx context.Context) { - if isGTEVersion(s.Config.EnvdVersion, minEnvdVersionForMetrcis) { - metrics, err := s.GetMetrics(ctx) - if err != nil { - s.Logger.Warnf("failed to get metrics: %s", err) - } else { - s.Logger.Metrics( - metrics.MemTotalMiB, metrics.MemUsedMiB, metrics.CPUCount, metrics.CPUUsedPercent) - } - } + return &m, nil } diff --git a/packages/orchestrator/internal/sandbox/nbd/dispatch.go b/packages/orchestrator/internal/sandbox/nbd/dispatch.go index 3d40d43..cb71398 100644 --- a/packages/orchestrator/internal/sandbox/nbd/dispatch.go +++ b/packages/orchestrator/internal/sandbox/nbd/dispatch.go @@ -3,6 +3,7 @@ package nbd import ( "context" "encoding/binary" + "errors" "fmt" "io" "sync" @@ -10,6 +11,8 @@ import ( "go.uber.org/zap" ) +var ErrShuttingDown = errors.New("shutting down. Cannot serve any new requests") + type Provider interface { io.ReaderAt io.WriterAt @@ -55,7 +58,9 @@ type Dispatch struct { writeLock sync.Mutex prov Provider pendingResponses sync.WaitGroup - pendingMu sync.Mutex + shuttingDown bool + shuttingDownLock sync.Mutex + fatal chan error } func NewDispatch(ctx context.Context, fp io.ReadWriteCloser, prov Provider) *Dispatch { @@ -64,15 +69,17 @@ func NewDispatch(ctx context.Context, fp io.ReadWriteCloser, prov Provider) *Dis fp: fp, prov: prov, ctx: ctx, + fatal: make(chan error, 1), } binary.BigEndian.PutUint32(d.responseHeader, NBDResponseMagic) return d } -func (d *Dispatch) Wait() { - d.pendingMu.Lock() - defer d.pendingMu.Unlock() +func (d *Dispatch) Drain() { + d.shuttingDownLock.Lock() + d.shuttingDown = true + defer d.shuttingDownLock.Unlock() // Wait for any pending responses d.pendingResponses.Wait() @@ -86,8 +93,6 @@ func (d *Dispatch) writeResponse(respError uint32, respHandle uint64, chunk []by d.writeLock.Lock() defer d.writeLock.Unlock() - // fmt.Printf("WriteResponse %v %x -> %d\n", d.fp, respHandle, len(chunk)) - binary.BigEndian.PutUint32(d.responseHeader[4:], respError) binary.BigEndian.PutUint64(d.responseHeader[8:], respHandle) @@ -126,9 +131,10 @@ func (d *Dispatch) Handle() error { rp := 0 process: for { - - // If the context has been cancelled, quit + // Check if there is a fatal error from an async read/write to return select { + case err := <-d.fatal: + return err case <-d.ctx.Done(): return d.ctx.Err() default: @@ -175,14 +181,13 @@ func (d *Dispatch) Handle() error { } case NBDCmdTrim: rp += 28 - err = d.cmdTrim(request.Handle, request.From, request.Length) + err := d.cmdTrim(request.Handle, request.From, request.Length) if err != nil { return err } default: return fmt.Errorf("nbd not implemented %d", request.Type) } - } else { break // Try again when we have more data... } @@ -196,41 +201,48 @@ func (d *Dispatch) Handle() error { } func (d *Dispatch) cmdRead(cmdHandle uint64, cmdFrom uint64, cmdLength uint32) error { - d.pendingMu.Lock() - d.pendingResponses.Add(1) - d.pendingMu.Unlock() + d.shuttingDownLock.Lock() + if !d.shuttingDown { + d.pendingResponses.Add(1) + } else { + d.shuttingDownLock.Unlock() + return ErrShuttingDown + } + d.shuttingDownLock.Unlock() performRead := func(handle uint64, from uint64, length uint32) error { - errchan := make(chan error) + // buffered to avoid goroutine leak + errchan := make(chan error, 1) data := make([]byte, length) go func() { - _, e := d.prov.ReadAt(data, int64(from)) - errchan <- e + _, err := d.prov.ReadAt(data, int64(from)) + errchan <- err }() // Wait until either the ReadAt completed, or our context is cancelled... - var e error select { case <-d.ctx.Done(): - e = d.ctx.Err() - case e = <-errchan: + return d.writeResponse(1, handle, []byte{}) + case err := <-errchan: + if err != nil { + return d.writeResponse(1, handle, []byte{}) + } } - errorValue := uint32(0) - if e != nil { - errorValue = 1 - data = make([]byte, 0) // If there was an error, don't send data - } - return d.writeResponse(errorValue, handle, data) + // read was successful + return d.writeResponse(0, handle, data) } go func() { err := performRead(cmdHandle, cmdFrom, cmdLength) if err != nil { - zap.L().Error("nbd error cmd read", zap.Error(err)) + select { + case d.fatal <- err: + default: + zap.L().Error("nbd error cmd read", zap.Error(err)) + } } - d.pendingResponses.Done() }() @@ -238,37 +250,48 @@ func (d *Dispatch) cmdRead(cmdHandle uint64, cmdFrom uint64, cmdLength uint32) e } func (d *Dispatch) cmdWrite(cmdHandle uint64, cmdFrom uint64, cmdData []byte) error { - d.pendingMu.Lock() - d.pendingResponses.Add(1) - d.pendingMu.Unlock() + d.shuttingDownLock.Lock() + if !d.shuttingDown { + d.pendingResponses.Add(1) + } else { + d.shuttingDownLock.Unlock() + return ErrShuttingDown + } + d.shuttingDownLock.Unlock() - go func() { - errchan := make(chan error) + performWrite := func(handle uint64, from uint64, data []byte) error { + // buffered to avoid goroutine leak + errchan := make(chan error, 1) go func() { - _, e := d.prov.WriteAt(cmdData, int64(cmdFrom)) - errchan <- e + _, err := d.prov.WriteAt(data, int64(from)) + errchan <- err }() // Wait until either the WriteAt completed, or our context is cancelled... - var e error select { case <-d.ctx.Done(): - e = d.ctx.Err() - case e = <-errchan: + return d.writeResponse(1, handle, []byte{}) + case err := <-errchan: + if err != nil { + return d.writeResponse(1, handle, []byte{}) + } } - errorValue := uint32(0) - if e != nil { - errorValue = 1 - } - err := d.writeResponse(errorValue, cmdHandle, []byte{}) + // write was successful + return d.writeResponse(0, handle, []byte{}) + } + + go func() { + err := performWrite(cmdHandle, cmdFrom, cmdData) if err != nil { - zap.L().Error("nbd error cmd write", zap.Error(err)) + select { + case d.fatal <- err: + default: + zap.L().Error("nbd error cmd write", zap.Error(err)) + } } - d.pendingResponses.Done() }() - return nil } diff --git a/packages/orchestrator/internal/sandbox/nbd/path_direct.go b/packages/orchestrator/internal/sandbox/nbd/path_direct.go deleted file mode 100644 index e45ca82..0000000 --- a/packages/orchestrator/internal/sandbox/nbd/path_direct.go +++ /dev/null @@ -1,170 +0,0 @@ -package nbd - -import ( - "context" - "fmt" - "log" - "net" - "os" - "strings" - "syscall" - "time" - - "github.com/Merovius/nbd/nbdnl" - - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" -) - -type DirectPathMount struct { - Backend block.Device - ctx context.Context - dispatcher *Dispatch - conn net.Conn - deviceIndex uint32 - blockSize uint64 - cancelfn context.CancelFunc -} - -func NewDirectPathMount(b block.Device) *DirectPathMount { - ctx, cancelfn := context.WithCancel(context.Background()) - - return &DirectPathMount{ - Backend: b, - ctx: ctx, - cancelfn: cancelfn, - blockSize: 4096, - } -} - -func (d *DirectPathMount) Open(ctx context.Context) (uint32, error) { - size, err := d.Backend.Size() - if err != nil { - return 0, err - } - - for { - d.deviceIndex, err = Pool.GetDevice(ctx) - if err != nil { - return 0, err - } - - // Create the socket pairs - sockPair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) - if err != nil { - return 0, err - } - - client := os.NewFile(uintptr(sockPair[0]), "client") - server := os.NewFile(uintptr(sockPair[1]), "server") - d.conn, err = net.FileConn(server) - - if err != nil { - return 0, err - } - server.Close() - - d.dispatcher = NewDispatch(d.ctx, d.conn, d.Backend) - // Start reading commands on the socket and dispatching them to our provider - go func() { - handleErr := d.dispatcher.Handle() - if handleErr != nil { - log.Printf("Error handling NBD commands: %v", handleErr) - } - }() - - var opts []nbdnl.ConnectOption - opts = append(opts, nbdnl.WithBlockSize(d.blockSize)) - opts = append(opts, nbdnl.WithTimeout(5*time.Second)) - opts = append(opts, nbdnl.WithDeadconnTimeout(5*time.Second)) - - serverFlags := nbdnl.FlagHasFlags | nbdnl.FlagCanMulticonn - - idx, err := nbdnl.Connect(d.deviceIndex, []*os.File{client}, uint64(size), 0, serverFlags, opts...) - if err == nil { - d.deviceIndex = idx - break - } - - // Sometimes (rare), there seems to be a BADF error here. Lets just retry for now... - // Close things down and try again... - _ = client.Close() - - connErr := d.conn.Close() - if connErr != nil { - fmt.Printf("Error closing conn: %v\n", connErr) - } - - releaseErr := Pool.ReleaseDevice(d.deviceIndex) - if releaseErr != nil { - fmt.Printf("Error releasing device: %v\n", releaseErr) - } - - d.deviceIndex = 0 - - if strings.Contains(err.Error(), "invalid argument") { - return 0, err - } - - time.Sleep(25 * time.Millisecond) - } - - // Wait until it's connected... - for { - select { - case <-d.ctx.Done(): - return 0, d.ctx.Err() - default: - } - - s, err := nbdnl.Status(d.deviceIndex) - if err == nil && s.Connected { - break - } - - time.Sleep(100 * time.Nanosecond) - } - - return d.deviceIndex, nil -} - -func (d *DirectPathMount) Close() error { - // First cancel the context, which will stop waiting on pending readAt/writeAt... - d.ctx.Done() - - // Now wait for any pending responses to be sent - if d.dispatcher != nil { - d.dispatcher.Wait() - } - - // Now ask to disconnect - err := nbdnl.Disconnect(d.deviceIndex) - if err != nil { - return err - } - - // Close all the socket pairs... - err = d.conn.Close() - if err != nil { - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second) - defer cancel() - // Wait until it's completely disconnected... - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - s, err := nbdnl.Status(d.deviceIndex) - if err == nil && !s.Connected { - break - } - - time.Sleep(100 * time.Nanosecond) - } - - return nil -} diff --git a/packages/orchestrator/internal/sandbox/nbd/path_direct_linux.go b/packages/orchestrator/internal/sandbox/nbd/path_direct_linux.go new file mode 100644 index 0000000..7c30223 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/nbd/path_direct_linux.go @@ -0,0 +1,277 @@ +//go:build linux +// +build linux + +package nbd + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/Merovius/nbd/nbdnl" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +const ( + connections = 4 + connectTimeout = 30 * time.Second + + // disconnectTimeout should not be necessary if the disconnect is reliable + disconnectTimeout = 30 * time.Second +) + +type DirectPathMount struct { + tracer trace.Tracer + ctx context.Context + cancelfn context.CancelFunc + + devicePool *DevicePool + + Backend block.Device + deviceIndex uint32 + blockSize uint64 + + dispatchers []*Dispatch + socksClient []*os.File + socksServer []io.Closer + + handlersWg sync.WaitGroup +} + +func NewDirectPathMount(tracer trace.Tracer, b block.Device, devicePool *DevicePool) *DirectPathMount { + ctx, cancelfn := context.WithCancel(context.Background()) + + return &DirectPathMount{ + tracer: tracer, + Backend: b, + ctx: ctx, + cancelfn: cancelfn, + blockSize: 4096, + devicePool: devicePool, + socksClient: make([]*os.File, 0), + socksServer: make([]io.Closer, 0), + deviceIndex: math.MaxUint32, + } +} + +func (d *DirectPathMount) Open(ctx context.Context) (retDeviceIndex uint32, err error) { + defer func() { + // Set the device index to the one returned, correctly capture error values + d.deviceIndex = retDeviceIndex + zap.L().Debug("opening direct path mount", zap.Uint32("device_index", d.deviceIndex), zap.Error(err)) + }() + + size, err := d.Backend.Size() + if err != nil { + return math.MaxUint32, err + } + + deviceIndex := uint32(math.MaxUint32) + + for { + deviceIndex, err = d.devicePool.GetDevice(ctx) + if err != nil { + return math.MaxUint32, err + } + + d.socksClient = make([]*os.File, 0) + d.socksServer = make([]io.Closer, 0) + d.dispatchers = make([]*Dispatch, 0) + + for i := 0; i < connections; i++ { + // Create the socket pairs + sockPair, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + if err != nil { + return math.MaxUint32, err + } + + client := os.NewFile(uintptr(sockPair[0]), "client") + server := os.NewFile(uintptr(sockPair[1]), "server") + serverc, err := net.FileConn(server) + if err != nil { + return math.MaxUint32, err + } + server.Close() + + dispatch := NewDispatch(d.ctx, serverc, d.Backend) + // Start reading commands on the socket and dispatching them to our provider + d.handlersWg.Add(1) + go func() { + defer d.handlersWg.Done() + + handleErr := dispatch.Handle() + // The error is expected to happen if the nbd (socket connection) is closed + zap.L().Info("closing handler for NBD commands", + zap.Error(handleErr), + zap.Uint32("device_index", deviceIndex), + zap.Int("socket_index", i), + ) + }() + + d.socksServer = append(d.socksServer, serverc) + d.socksClient = append(d.socksClient, client) + d.dispatchers = append(d.dispatchers, dispatch) + } + + var opts []nbdnl.ConnectOption + opts = append(opts, nbdnl.WithBlockSize(d.blockSize)) + opts = append(opts, nbdnl.WithTimeout(connectTimeout)) + opts = append(opts, nbdnl.WithDeadconnTimeout(connectTimeout)) + + serverFlags := nbdnl.FlagHasFlags | nbdnl.FlagCanMulticonn + + idx, err := nbdnl.Connect(deviceIndex, d.socksClient, uint64(size), 0, serverFlags, opts...) + if err == nil { + // The idx should be the same as deviceIndex, because we are connecting to it, + // but we will use the one returned by nbdnl + deviceIndex = idx + + break + } + + zap.L().Error("error opening NBD, retrying", zap.Error(err), zap.Uint32("device_index", deviceIndex)) + + // Sometimes (rare), there seems to be a BADF error here. Lets just retry for now... + // Close things down and try again... + for _, sock := range d.socksClient { + sock.Close() + } + for _, sock := range d.socksServer { + sock.Close() + } + // Release the device back to the pool + releaseErr := d.devicePool.ReleaseDevice(deviceIndex) + if releaseErr != nil { + zap.L().Error("error opening NBD, error releasing device", zap.Error(releaseErr), zap.Uint32("device_index", deviceIndex)) + } + + if strings.Contains(err.Error(), "invalid argument") { + return math.MaxUint32, err + } + + select { + case <-d.ctx.Done(): + return math.MaxUint32, errors.Join(err, d.ctx.Err()) + case <-time.After(25 * time.Millisecond): + } + } + + // Wait until it's connected... + for { + select { + case <-d.ctx.Done(): + return math.MaxUint32, d.ctx.Err() + default: + } + + s, err := nbdnl.Status(deviceIndex) + if err == nil && s.Connected { + break + } + + time.Sleep(100 * time.Nanosecond) + } + + return deviceIndex, nil +} + +func (d *DirectPathMount) Close(ctx context.Context) error { + childCtx, childSpan := d.tracer.Start(ctx, "direct-path-mount-close") + defer childSpan.End() + + var errs []error + + idx := d.deviceIndex + + // First cancel the context, which will stop waiting on pending readAt/writeAt... + telemetry.ReportEvent(childCtx, "canceling context") + d.cancelfn() + + // Close all server socket pairs... + telemetry.ReportEvent(childCtx, "closing socket pairs server") + for _, v := range d.socksServer { + err := v.Close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing server pair: %w", err)) + } + } + + // Now wait until the handlers return + telemetry.ReportEvent(childCtx, "await handlers return") + d.handlersWg.Wait() + + // Now wait for any pending responses to be sent + telemetry.ReportEvent(childCtx, "waiting for pending responses") + for _, d := range d.dispatchers { + d.Drain() + } + + // Disconnect NBD + if idx != math.MaxUint32 { + err := disconnectNBDWithTimeout(childCtx, idx, disconnectTimeout) + if err != nil { + errs = append(errs, fmt.Errorf("error disconnecting NBD: %w", err)) + } + } + + // Close all client socket pairs... + telemetry.ReportEvent(childCtx, "closing socket pairs client") + for _, v := range d.socksClient { + err := v.Close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing socket pair client: %w", err)) + } + } + + // Release the device back to the pool, retry if it is in use + if idx != math.MaxUint32 { + telemetry.ReportEvent(childCtx, "releasing device to the pool") + err := d.devicePool.ReleaseDeviceWithRetry(idx) + if err != nil { + errs = append(errs, fmt.Errorf("error releasing overlay device: %w", err)) + } + } + + return errors.Join(errs...) +} + +func disconnectNBDWithTimeout(ctx context.Context, deviceIndex uint32, timeout time.Duration) error { + // Now ask to disconnect + telemetry.ReportEvent(ctx, "disconnecting NBD") + err := nbdnl.Disconnect(deviceIndex) + if err != nil { + return err + } + + // Wait until it's completely disconnected... + telemetry.ReportEvent(ctx, "waiting for complete disconnection") + ctxTimeout, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + select { + case <-ctxTimeout.Done(): + return ctxTimeout.Err() + default: + } + + s, err := nbdnl.Status(deviceIndex) + if err == nil && !s.Connected { + break + } + time.Sleep(100 * time.Nanosecond) + } + + return nil +} diff --git a/packages/orchestrator/internal/sandbox/nbd/path_direct_other.go b/packages/orchestrator/internal/sandbox/nbd/path_direct_other.go new file mode 100644 index 0000000..d6ed68e --- /dev/null +++ b/packages/orchestrator/internal/sandbox/nbd/path_direct_other.go @@ -0,0 +1,29 @@ +//go:build !linux +// +build !linux + +package nbd + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" +) + +type DirectPathMount struct { + Backend block.Device +} + +func NewDirectPathMount(tracer trace.Tracer, b block.Device, devicePool *DevicePool) *DirectPathMount { + return nil +} + +func (d *DirectPathMount) Open(ctx context.Context) (uint32, error) { + return 0, errors.New("platform does not support direct path mount") +} + +func (d *DirectPathMount) Close(ctx context.Context) error { + return errors.New("platform does not support direct path mount") +} diff --git a/packages/orchestrator/internal/sandbox/nbd/pool.go b/packages/orchestrator/internal/sandbox/nbd/pool.go index 27bdaca..432e479 100644 --- a/packages/orchestrator/internal/sandbox/nbd/pool.go +++ b/packages/orchestrator/internal/sandbox/nbd/pool.go @@ -4,17 +4,17 @@ import ( "context" "errors" "fmt" - "log" "os" - "regexp" "strconv" "strings" "sync" + "time" "github.com/bits-and-blooms/bitset" "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" - "github.com/e2b-dev/infra/packages/shared/pkg/meters" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" ) // maxSlotsReady is the number of slots that are ready to be used. @@ -47,7 +47,9 @@ type ( // // Use `sudo modprobe nbd nbds_max=4096` to set the max number of devices to 4096, which is a good default for now. type DevicePool struct { - ctx context.Context + ctx context.Context + exit chan error + // We use the bitset to speedup the free device lookup. usedSlots *bitset.BitSet mu sync.Mutex @@ -57,25 +59,25 @@ type DevicePool struct { slotCounter metric.Int64UpDownCounter } -var Pool *DevicePool - -func init() { +func NewDevicePool(ctx context.Context, meterProvider metric.MeterProvider) (*DevicePool, error) { maxDevices, err := getMaxDevices() if err != nil { - panic(fmt.Errorf("failed to get current max devices: %w", err)) + return nil, fmt.Errorf("failed to get max devices: %w", err) } if maxDevices == 0 { - panic(fmt.Errorf("nbd module is not loaded or max devices is set to 0")) + return nil, errors.New("max devices is 0") } - counter, err := meters.GetUpDownCounter(meters.NBDkSlotSReadyPoolCounterMeterName) + meter := meterProvider.Meter("orchestrator.device.pool") + counter, err := telemetry.GetUpDownCounter(meter, telemetry.NBDkSlotSReadyPoolCounterMeterName) if err != nil { - panic(fmt.Errorf("failed to get nbd slot pool counter: %w", err)) + return nil, fmt.Errorf("failed to get slot pool counter: %w", err) } pool := &DevicePool{ - ctx: context.Background(), + ctx: ctx, + exit: make(chan error, 1), usedSlots: bitset.New(maxDevices), slots: make(chan DeviceSlot, maxSlotsReady), slotCounter: counter, @@ -84,11 +86,13 @@ func init() { go func() { err = pool.Populate() if err != nil { - log.Fatalf("failed during populating device pool: %v\n", err) + zap.L().Fatal("failed during populating device pool", zap.Error(err)) } + + zap.L().Info("device pool populate closed") }() - Pool = pool + return pool, nil } func getMaxDevices() (uint, error) { @@ -117,16 +121,25 @@ func (d *DevicePool) Populate() error { select { case <-d.ctx.Done(): return d.ctx.Err() + case err := <-d.exit: + return err default: device, err := d.getFreeDeviceSlot() if err != nil { - fmt.Fprintf(os.Stderr, "[nbd pool]: failed to create network: %v\n", err) + zap.L().Error("[nbd pool]: failed to create network", zap.Error(err)) continue } d.slotCounter.Add(d.ctx, 1) - d.slots <- *device + + // Use select to avoid panic if context is canceled before writing + select { + case err := <-d.exit: + return err + case d.slots <- *device: + // sent successfully + } } } } @@ -227,18 +240,16 @@ func (d *DevicePool) getFreeDeviceSlot() (*DeviceSlot, error) { // Get device slot if there is one available. func (d *DevicePool) GetDevice(ctx context.Context) (DeviceSlot, error) { - for { - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } - slot := <-d.slots - d.slotCounter.Add(d.ctx, -1) + slot := <-d.slots + d.slotCounter.Add(d.ctx, -1) - return slot, nil - } + return slot, nil } // ReleaseDevice will return an error if the device is not free and not release the slot — you can retry. @@ -259,22 +270,49 @@ func (d *DevicePool) ReleaseDevice(idx DeviceSlot) error { return nil } +// ReleaseDeviceWithRetry calls ReleaseDevice and retries if the device is in use. +func (d *DevicePool) ReleaseDeviceWithRetry(idx DeviceSlot) error { + attempt := 0 + for { + attempt++ + err := d.ReleaseDevice(idx) + if errors.Is(err, ErrDeviceInUse{}) { + if attempt%100 == 0 { + zap.L().Error("error releasing device", zap.Int("attempt", attempt), zap.Error(err)) + } + + time.Sleep(500 * time.Millisecond) + + continue + } + + if err != nil { + return fmt.Errorf("error releasing device: %w", err) + } + + break + } + + return nil +} + func GetDevicePath(slot DeviceSlot) DevicePath { return fmt.Sprintf("/dev/nbd%d", slot) } -var reSlot = regexp.MustCompile(`^/dev/nbd(\d+)$`) +func (d *DevicePool) Close(_ context.Context) error { + zap.L().Info("Closing device pool", zap.Uint("used_slots", d.usedSlots.Count())) -func GetDeviceSlot(path DevicePath) (DeviceSlot, error) { - matches := reSlot.FindStringSubmatch(path) - if len(matches) != 2 { - return 0, fmt.Errorf("invalid nbd path: %s", path) - } + close(d.exit) - slot, err := strconv.ParseUint(matches[1], 10, 0) - if err != nil { - return 0, fmt.Errorf("failed to parse slot from path: %w", err) + var errs error + for slotIdx, e := d.usedSlots.NextSet(0); e; slotIdx, e = d.usedSlots.NextSet(slotIdx + 1) { + slot := DeviceSlot(slotIdx) + err := d.ReleaseDeviceWithRetry(slot) + if err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to release device %d: %w", slot, err)) + } } - return DeviceSlot(slot), nil + return errs } diff --git a/packages/orchestrator/internal/sandbox/network/blocking_rules.go b/packages/orchestrator/internal/sandbox/network/blocking_rules.go deleted file mode 100644 index 91d4630..0000000 --- a/packages/orchestrator/internal/sandbox/network/blocking_rules.go +++ /dev/null @@ -1,42 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/coreos/go-iptables/iptables" -) - -var blockedRanges = []string{ - "10.0.0.0/8", - "169.254.0.0/16", - "192.168.0.0/16", - "172.16.0.0/12", -} - -func getBlockingRule(slot *Slot, ipRange string) []string { - return []string{"-p", "all", "-i", slot.TapName(), "-d", ipRange, "-j", "DROP"} -} - -func getAllowRule(slot *Slot) []string { - return []string{"-p", "tcp", "-i", slot.TapName(), "-m", "conntrack", "--ctstate", "ESTABLISHED,RELATED", "-j", "ACCEPT"} -} - -func (s *Slot) addBlockingRules(tables *iptables.IPTables) error { - for _, ipRange := range blockedRanges { - rule := getBlockingRule(s, ipRange) - - err := tables.Append("filter", "FORWARD", rule...) - if err != nil { - return fmt.Errorf("error adding blocking rule: %w", err) - } - } - - allowRule := getAllowRule(s) - - err := tables.Insert("filter", "FORWARD", 1, allowRule...) - if err != nil { - return fmt.Errorf("error adding response rule: %w", err) - } - - return nil -} diff --git a/packages/orchestrator/internal/sandbox/network/firewall.go b/packages/orchestrator/internal/sandbox/network/firewall.go new file mode 100644 index 0000000..8563fec --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/firewall.go @@ -0,0 +1,253 @@ +package network + +import ( + "fmt" + "net/netip" + "os" + "strings" + + "github.com/google/nftables" + "github.com/google/nftables/expr" + "github.com/ngrok/firewall_toolkit/pkg/expressions" + "github.com/ngrok/firewall_toolkit/pkg/rule" + "github.com/ngrok/firewall_toolkit/pkg/set" +) + +const ( + tableName = "slot-firewall" +) + +var blockedRanges = []string{ + "10.0.0.0/8", + "169.254.0.0/16", + "192.168.0.0/16", + "172.16.0.0/12", +} + +type Firewall struct { + conn *nftables.Conn + table *nftables.Table + chain *nftables.Chain + blockSet set.Set + allowSet set.Set + tapInterface string +} + +func NewFirewall(tapIf string) (*Firewall, error) { + conn, err := nftables.New(nftables.AsLasting()) + if err != nil { + return nil, fmt.Errorf("new nftables conn: %w", err) + } + + table := conn.AddTable(&nftables.Table{ + Name: tableName, + Family: nftables.TableFamilyINet, + }) + acceptPolicy := nftables.ChainPolicyAccept + chain := conn.AddChain(&nftables.Chain{ + Name: "FORWARD", + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookForward, + Priority: nftables.ChainPriorityFilter, + Policy: &acceptPolicy, + }) + + // Create block-set and allow-set + blockSet, err := set.New(conn, table, "filtered_blocklist", nftables.TypeIPAddr) + if err != nil { + return nil, fmt.Errorf("new block set: %w", err) + } + allowSet, err := set.New(conn, table, "filtered_allowlist", nftables.TypeIPAddr) + if err != nil { + return nil, fmt.Errorf("new allow set: %w", err) + } + + fw := &Firewall{ + conn: conn, + table: table, + chain: chain, + blockSet: blockSet, + allowSet: allowSet, + tapInterface: tapIf, + } + + // Add firewall rules to the chain + if err := fw.installRules(); err != nil { + return nil, err + } + + // Populate the sets with initial data + err = fw.ResetAllCustom() + if err != nil { + return nil, fmt.Errorf("error while configuring initial block set: %w", err) + } + return fw, nil +} + +func (fw *Firewall) Close() error { + return fw.conn.CloseLasting() +} + +func (fw *Firewall) installRules() error { + m := fw.tapInterface + + // helper for the tap interface + ifaceMatch := []expr.Any{ + &expr.Meta{Key: expr.MetaKeyIIFNAME, Register: 1}, + &expr.Cmp{ + Register: 1, + Op: expr.CmpOpEq, + Data: append([]byte(m), 0), // null-terminated + }, + } + + // Allow ESTABLISHED,RELATED + exprs, err := rule.Build( + expr.VerdictAccept, + rule.TransportProtocol(expressions.TCP), + rule.LoadConnectionTrackingState(expr.CtKeySTATE), + rule.ConnectionTrackingState(expr.CtStateBitRELATED|expr.CtStateBitESTABLISHED), + ) + if err != nil { + return fmt.Errorf("build rule for established/related: %w", err) + } + fw.conn.InsertRule(&nftables.Rule{ + Table: fw.table, Chain: fw.chain, + Exprs: append(ifaceMatch, + exprs..., + ), + }) + + // Allow anything in allowSet + fw.conn.InsertRule(&nftables.Rule{ + Table: fw.table, Chain: fw.chain, + Exprs: append(ifaceMatch, + expressions.IPv4DestinationAddress(1), + expressions.IPSetLookUp(fw.allowSet.Set(), 1), + expressions.Accept(), + ), + }) + + // Drop anything in blockSet + fw.conn.AddRule(&nftables.Rule{ + Table: fw.table, Chain: fw.chain, + Exprs: append(ifaceMatch, + expressions.IPv4DestinationAddress(1), + expressions.IPSetLookUp(fw.blockSet.Set(), 1), + expressions.Drop(), + ), + }) + + if err := fw.conn.Flush(); err != nil { + return fmt.Errorf("flush nftables changes: %w", err) + } + + return nil +} + +// AddBlockedIP adds a single CIDR to the block set at runtime. +func (fw *Firewall) AddBlockedIP(cidr string) error { + // 0.0.0.0/0 is not valid IP per GoLang, so we handle it as a special case + if cidr == "0.0.0.0/0" { + fw.conn.FlushSet(fw.blockSet.Set()) + + toAppend := []nftables.SetElement{ + {Key: netip.MustParseAddr("0.0.0.0").AsSlice()}, + { + Key: netip.MustParseAddr("255.255.255.255").AsSlice(), + IntervalEnd: true, + }, + } + + if err := fw.conn.SetAddElements(fw.blockSet.Set(), toAppend); err != nil { + return fmt.Errorf("add elements to block set: %w", err) + } + } else { + current, err := fw.blockSet.Elements(fw.conn) + if err != nil { + return err + } + + data, err := set.AddressStringsToSetData([]string{cidr}) + if err != nil { + return err + } + merged := append(current, data...) + if err := fw.blockSet.ClearAndAddElements(fw.conn, merged); err != nil { + return err + } + } + + err := fw.conn.Flush() + if err != nil { + return fmt.Errorf("flush add blocked IP changes: %w", err) + } + return nil +} + +// AddAllowedIP adds a single CIDR to the allow set at runtime. +func (fw *Firewall) AddAllowedIP(cidr string) error { + data, err := set.AddressStringsToSetData([]string{cidr}) + if err != nil { + return err + } + current, err := fw.allowSet.Elements(fw.conn) + if err != nil { + return err + } + merged := append(current, data...) + if err := fw.allowSet.ClearAndAddElements(fw.conn, merged); err != nil { + return err + } + + err = fw.conn.Flush() + if err != nil { + return fmt.Errorf("flush add allowed IP changes: %w", err) + } + return nil +} + +func (fw *Firewall) ResetAllCustom() error { + if err := fw.ResetBlockedCustom(); err != nil { + return fmt.Errorf("clear block set: %w", err) + } + if err := fw.ResetAllowedCustom(); err != nil { + return fmt.Errorf("clear allow set: %w", err) + } + + return nil +} + +// ResetBlockedCustom resets the block set back to original ranges. +func (fw *Firewall) ResetBlockedCustom() error { + initData, err := set.AddressStringsToSetData(blockedRanges) + if err != nil { + return fmt.Errorf("parse initial block CIDRs: %w", err) + } + + if err := fw.blockSet.ClearAndAddElements(fw.conn, initData); err != nil { + return err + } + return fw.conn.Flush() +} + +// ResetAllowedCustom resets allow set back to original ranges. +func (fw *Firewall) ResetAllowedCustom() error { + initIps := make([]string, 0) + + // Allow Logs Collector IP for logs + if ip := os.Getenv("LOGS_COLLECTOR_PUBLIC_IP"); ip != "" { + ip = strings.TrimPrefix(ip, "http://") + "/32" + initIps = append(initIps, ip) + } + + initData, err := set.AddressStringsToSetData(initIps) + if err != nil { + return fmt.Errorf("parse initial allow CIDRs: %w", err) + } + if err := fw.allowSet.ClearAndAddElements(fw.conn, initData); err != nil { + return err + } + return fw.conn.Flush() +} diff --git a/packages/orchestrator/internal/sandbox/network/host.go b/packages/orchestrator/internal/sandbox/network/host_linux.go similarity index 89% rename from packages/orchestrator/internal/sandbox/network/host.go rename to packages/orchestrator/internal/sandbox/network/host_linux.go index a0df09e..6814b32 100644 --- a/packages/orchestrator/internal/sandbox/network/host.go +++ b/packages/orchestrator/internal/sandbox/network/host_linux.go @@ -1,10 +1,13 @@ +//go:build linux +// +build linux + package network import ( "fmt" - "log" "github.com/vishvananda/netlink" + "go.uber.org/zap" "github.com/e2b-dev/infra/packages/shared/pkg/utils" ) @@ -36,7 +39,8 @@ func getDefaultGateway() (string, error) { for _, route := range routes { // 0.0.0.0/0 if route.Dst.String() == "0.0.0.0/0" && route.Gw != nil { - log.Printf("default gateway: %s", route.Gw.String()) + zap.L().Info("default gateway", zap.String("gateway", route.Gw.String())) + link, linkErr := netlink.LinkByIndex(route.LinkIndex) if linkErr != nil { diff --git a/packages/orchestrator/internal/sandbox/network/host_other.go b/packages/orchestrator/internal/sandbox/network/host_other.go new file mode 100644 index 0000000..9c8dada --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/host_other.go @@ -0,0 +1,7 @@ +//go:build !linux +// +build !linux + +package network + +// Host loopback interface name +const loopbackInterface = "lo" diff --git a/packages/orchestrator/internal/sandbox/network/network.go b/packages/orchestrator/internal/sandbox/network/network_linux.go similarity index 82% rename from packages/orchestrator/internal/sandbox/network/network.go rename to packages/orchestrator/internal/sandbox/network/network_linux.go index 2b49da4..4f3b2a3 100644 --- a/packages/orchestrator/internal/sandbox/network/network.go +++ b/packages/orchestrator/internal/sandbox/network/network_linux.go @@ -1,15 +1,18 @@ +//go:build linux +// +build linux + package network import ( "errors" "fmt" "net" - "os" "runtime" "github.com/coreos/go-iptables/iptables" "github.com/vishvananda/netlink" "github.com/vishvananda/netns" + "go.uber.org/zap" ) func (s *Slot) CreateNetwork() error { @@ -26,12 +29,12 @@ func (s *Slot) CreateNetwork() error { defer func() { err = netns.Set(hostNS) if err != nil { - fmt.Fprintf(os.Stderr, "error resetting network namespace back to the host namespace: %v", err) + zap.L().Error("error resetting network namespace back to the host namespace", zap.Error(err)) } err = hostNS.Close() if err != nil { - fmt.Fprintf(os.Stderr, "error closing host network namespace: %v", err) + zap.L().Error("error closing host network namespace", zap.Error(err)) } }() @@ -66,15 +69,10 @@ func (s *Slot) CreateNetwork() error { return fmt.Errorf("error setting vpeer device up: %w", err) } - ip, ipNet, err := net.ParseCIDR(s.VpeerCIDR()) - if err != nil { - return fmt.Errorf("error parsing vpeer CIDR: %w", err) - } - err = netlink.AddrAdd(vpeer, &netlink.Addr{ IPNet: &net.IPNet{ - IP: ip, - Mask: ipNet.Mask, + IP: s.VpeerIP(), + Mask: s.VrtMask(), }, }) if err != nil { @@ -102,15 +100,10 @@ func (s *Slot) CreateNetwork() error { return fmt.Errorf("error setting veth device up: %w", err) } - ip, ipNet, err = net.ParseCIDR(s.VethCIDR()) - if err != nil { - return fmt.Errorf("error parsing veth CIDR: %w", err) - } - err = netlink.AddrAdd(vethInHost, &netlink.Addr{ IPNet: &net.IPNet{ - IP: ip, - Mask: ipNet.Mask, + IP: s.VethIP(), + Mask: s.VrtMask(), }, }) if err != nil { @@ -141,15 +134,10 @@ func (s *Slot) CreateNetwork() error { return fmt.Errorf("error setting tap device up: %w", err) } - ip, ipNet, err = net.ParseCIDR(s.TapCIDR()) - if err != nil { - return fmt.Errorf("error parsing tap CIDR: %w", err) - } - err = netlink.AddrAdd(tap, &netlink.Addr{ IPNet: &net.IPNet{ - IP: ip, - Mask: ipNet.Mask, + IP: s.TapIP(), + Mask: s.TapCIDR(), }, }) if err != nil { @@ -170,7 +158,7 @@ func (s *Slot) CreateNetwork() error { // Add NS default route err = netlink.RouteAdd(&netlink.Route{ Scope: netlink.SCOPE_UNIVERSE, - Gw: net.ParseIP(s.VethIP()), + Gw: s.VethIP(), }) if err != nil { return fmt.Errorf("error adding default NS route: %w", err) @@ -182,19 +170,19 @@ func (s *Slot) CreateNetwork() error { } // Add NAT routing rules to NS - err = tables.Append("nat", "POSTROUTING", "-o", s.VpeerName(), "-s", s.NamespaceIP(), "-j", "SNAT", "--to", s.HostIP()) + err = tables.Append("nat", "POSTROUTING", "-o", s.VpeerName(), "-s", s.NamespaceIP(), "-j", "SNAT", "--to", s.HostIPString()) if err != nil { return fmt.Errorf("error creating postrouting rule to vpeer: %w", err) } - err = tables.Append("nat", "PREROUTING", "-i", s.VpeerName(), "-d", s.HostIP(), "-j", "DNAT", "--to", s.NamespaceIP()) + err = tables.Append("nat", "PREROUTING", "-i", s.VpeerName(), "-d", s.HostIPString(), "-j", "DNAT", "--to", s.NamespaceIP()) if err != nil { return fmt.Errorf("error creating postrouting rule from vpeer: %w", err) } - err = s.addBlockingRules(tables) + err = s.InitializeFirewall() if err != nil { - return fmt.Errorf("error adding blocking rules: %w", err) + return fmt.Errorf("error initializing slot firewall: %w", err) } // Go back to original namespace @@ -204,14 +192,9 @@ func (s *Slot) CreateNetwork() error { } // Add routing from host to FC namespace - _, ipNet, err = net.ParseCIDR(s.HostCIDR()) - if err != nil { - return fmt.Errorf("error parsing host snapshot CIDR: %w", err) - } - err = netlink.RouteAdd(&netlink.Route{ - Gw: net.ParseIP(s.VpeerIP()), - Dst: ipNet, + Gw: s.VpeerIP(), + Dst: s.HostNet(), }) if err != nil { return fmt.Errorf("error adding route from host to FC: %w", err) @@ -240,6 +223,11 @@ func (s *Slot) CreateNetwork() error { func (s *Slot) RemoveNetwork() error { var errs []error + err := s.CloseFirewall() + if err != nil { + errs = append(errs, fmt.Errorf("error closing firewall: %w", err)) + } + tables, err := iptables.New() if err != nil { errs = append(errs, fmt.Errorf("error initializing iptables: %w", err)) @@ -263,17 +251,12 @@ func (s *Slot) RemoveNetwork() error { } // Delete routing from host to FC namespace - _, ipNet, err := net.ParseCIDR(s.HostCIDR()) + err = netlink.RouteDel(&netlink.Route{ + Gw: s.VpeerIP(), + Dst: s.HostNet(), + }) if err != nil { - errs = append(errs, fmt.Errorf("error parsing host snapshot CIDR: %w", err)) - } else { - err = netlink.RouteDel(&netlink.Route{ - Gw: net.ParseIP(s.VpeerIP()), - Dst: ipNet, - }) - if err != nil { - errs = append(errs, fmt.Errorf("error deleting route from host to FC: %w", err)) - } + errs = append(errs, fmt.Errorf("error deleting route from host to FC: %w", err)) } // Delete veth device diff --git a/packages/orchestrator/internal/sandbox/network/network_other.go b/packages/orchestrator/internal/sandbox/network/network_other.go new file mode 100644 index 0000000..87717a8 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/network_other.go @@ -0,0 +1,16 @@ +//go:build !linux +// +build !linux + +package network + +import ( + "errors" +) + +func (s *Slot) CreateNetwork() error { + return errors.New("platform does not support network creation") +} + +func (s *Slot) RemoveNetwork() error { + return errors.New("platform does not support network removal") +} diff --git a/packages/orchestrator/internal/sandbox/network/pool.go b/packages/orchestrator/internal/sandbox/network/pool.go index 162effd..1be5639 100644 --- a/packages/orchestrator/internal/sandbox/network/pool.go +++ b/packages/orchestrator/internal/sandbox/network/pool.go @@ -4,12 +4,11 @@ import ( "context" "errors" "fmt" - "log" - "os" "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" - "github.com/e2b-dev/infra/packages/shared/pkg/meters" "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" ) @@ -22,26 +21,35 @@ type Pool struct { ctx context.Context cancel context.CancelFunc - newSlots chan Slot - reusedSlots chan Slot + newSlots chan *Slot + reusedSlots chan *Slot newSlotCounter metric.Int64UpDownCounter reusedSlotCounter metric.Int64UpDownCounter + + slotStorage Storage } -func NewPool(ctx context.Context, newSlotsPoolSize, reusedSlotsPoolSize int) (*Pool, error) { - newSlots := make(chan Slot, newSlotsPoolSize-1) - reusedSlots := make(chan Slot, reusedSlotsPoolSize) +func NewPool(ctx context.Context, meterProvider metric.MeterProvider, newSlotsPoolSize, reusedSlotsPoolSize int, clientID string, tracer trace.Tracer) (*Pool, error) { + newSlots := make(chan *Slot, newSlotsPoolSize-1) + reusedSlots := make(chan *Slot, reusedSlotsPoolSize) + + meter := meterProvider.Meter("orchestrator.network.pool") - newSlotCounter, err := meters.GetUpDownCounter(meters.NewNetworkSlotSPoolCounterMeterName) + newSlotCounter, err := telemetry.GetUpDownCounter(meter, telemetry.NewNetworkSlotSPoolCounterMeterName) if err != nil { return nil, fmt.Errorf("failed to create new slot counter: %w", err) } - reusedSlotsCounter, err := meters.GetUpDownCounter(meters.ReusedNetworkSlotSPoolCounterMeterName) + reusedSlotsCounter, err := telemetry.GetUpDownCounter(meter, telemetry.ReusedNetworkSlotSPoolCounterMeterName) if err != nil { return nil, fmt.Errorf("failed to create reused slot counter: %w", err) } + slotStorage, err := NewStorage(vrtSlotsSize, clientID, tracer) + if err != nil { + return nil, fmt.Errorf("failed to create slot storage: %w", err) + } + ctx, cancel := context.WithCancel(ctx) pool := &Pool{ newSlots: newSlots, @@ -50,27 +58,30 @@ func NewPool(ctx context.Context, newSlotsPoolSize, reusedSlotsPoolSize int) (*P reusedSlotCounter: reusedSlotsCounter, ctx: ctx, cancel: cancel, + slotStorage: slotStorage, } go func() { err := pool.populate(ctx) if err != nil { - log.Fatalf("error when populating network slot pool: %v\n", err) + zap.L().Fatal("error when populating network slot pool", zap.Error(err)) } + + zap.L().Info("network slot pool populate closed") }() return pool, nil } func (p *Pool) createNetworkSlot() (*Slot, error) { - ips, err := NewSlot() + ips, err := p.slotStorage.Acquire(p.ctx) if err != nil { return nil, fmt.Errorf("failed to create network: %w", err) } err = ips.CreateNetwork() if err != nil { - releaseErr := ips.Release() + releaseErr := p.slotStorage.Release(ips) err = errors.Join(err, releaseErr) return nil, fmt.Errorf("failed to create network: %w", err) @@ -80,50 +91,72 @@ func (p *Pool) createNetworkSlot() (*Slot, error) { } func (p *Pool) populate(ctx context.Context) error { + defer close(p.newSlots) + for { select { case <-ctx.Done(): - return ctx.Err() + // Do not return an error here, this is expected on close + return nil default: slot, err := p.createNetworkSlot() if err != nil { - fmt.Fprintf(os.Stderr, "[network slot pool]: failed to create network: %v\n", err) + zap.L().Error("[network slot pool]: failed to create network", zap.Error(err)) continue } p.newSlotCounter.Add(ctx, 1) - p.newSlots <- *slot + p.newSlots <- slot } } } -func (p *Pool) Get(ctx context.Context) (Slot, error) { +func (p *Pool) Get(ctx context.Context, tracer trace.Tracer, allowInternet bool) (*Slot, error) { + var slot *Slot + select { - case slot := <-p.reusedSlots: + case s := <-p.reusedSlots: p.reusedSlotCounter.Add(ctx, -1) telemetry.ReportEvent(ctx, "reused network slot") - return slot, nil + slot = s default: select { case <-ctx.Done(): - return Slot{}, ctx.Err() - case slot := <-p.newSlots: + return nil, ctx.Err() + case s := <-p.newSlots: p.newSlotCounter.Add(ctx, -1) telemetry.ReportEvent(ctx, "new network slot") - return slot, nil + slot = s } } + + err := slot.ConfigureInternet(ctx, tracer, allowInternet) + if err != nil { + return nil, fmt.Errorf("error setting slot internet access: %w", err) + } + + return slot, nil } -func (p *Pool) Return(slot Slot) error { +func (p *Pool) Return(ctx context.Context, tracer trace.Tracer, slot *Slot) error { + err := slot.ResetInternet(ctx, tracer) + if err != nil { + // Cleanup the slot if resetting internet fails + if cerr := p.cleanup(slot); cerr != nil { + return fmt.Errorf("reset internet: %v; cleanup: %w", err, cerr) + } + + return fmt.Errorf("error resetting slot internet access: %w", err) + } + select { case p.reusedSlots <- slot: p.reusedSlotCounter.Add(context.Background(), 1) default: - err := cleanup(slot) + err := p.cleanup(slot) if err != nil { return fmt.Errorf("failed to return slot '%d': %w", slot.Idx, err) } @@ -132,7 +165,7 @@ func (p *Pool) Return(slot Slot) error { return nil } -func cleanup(slot Slot) error { +func (p *Pool) cleanup(slot *Slot) error { var errs []error err := slot.RemoveNetwork() @@ -140,7 +173,7 @@ func cleanup(slot Slot) error { errs = append(errs, fmt.Errorf("cannot remove network when releasing slot '%d': %w", slot.Idx, err)) } - err = slot.Release() + err = p.slotStorage.Release(slot) if err != nil { errs = append(errs, fmt.Errorf("failed to release slot '%d': %w", slot.Idx, err)) } @@ -148,18 +181,21 @@ func cleanup(slot Slot) error { return errors.Join(errs...) } -func (p *Pool) Close() error { +func (p *Pool) Close(_ context.Context) error { p.cancel() + zap.L().Info("Closing network pool") + for slot := range p.newSlots { - err := cleanup(slot) + err := p.cleanup(slot) if err != nil { return fmt.Errorf("failed to cleanup slot '%d': %w", slot.Idx, err) } } + close(p.reusedSlots) for slot := range p.reusedSlots { - err := cleanup(slot) + err := p.cleanup(slot) if err != nil { return fmt.Errorf("failed to cleanup slot '%d': %w", slot.Idx, err) } diff --git a/packages/orchestrator/internal/sandbox/network/slot.go b/packages/orchestrator/internal/sandbox/network/slot.go index 594523b..ad888ec 100644 --- a/packages/orchestrator/internal/sandbox/network/slot.go +++ b/packages/orchestrator/internal/sandbox/network/slot.go @@ -1,85 +1,173 @@ package network import ( + "context" "fmt" - "math/rand" - "slices" + "log" + "net" + "path/filepath" + "sync/atomic" - consulApi "github.com/hashicorp/consul/api" + "github.com/containernetworking/plugins/pkg/ns" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + netutils "k8s.io/utils/net" - "github.com/e2b-dev/infra/packages/orchestrator/internal/consul" + "github.com/e2b-dev/infra/packages/shared/pkg/env" ) -// We are using a more debuggable IP address allocation for now that only covers 255 addresses. const ( - octetSize = 256 - octetMax = octetSize - 1 - // This is the maximum number of IP addresses that can be allocated. - slotsSize = octetSize * octetSize - - hostMask = 32 - vMask = 30 - tapMask = 30 + defaultHostNetworkCIDR = "10.11.0.0/16" + defaultVrtNetworkCIDR = "10.12.0.0/16" + + hostMask = 32 + vrtMask = 31 // 2 usable ips per block (vpeer and veth) + vrtAddressPerSlot = 1 << (32 - vrtMask) // vrt addresses per slot (vpeer and veth) + + tapMask = 30 + tapInterfaceName = "tap0" + tapIp = "169.254.0.22" + tapMAC = "02:FC:00:00:00:05" +) + +var ( + hostNetworkCIDR = getHostNetworkCIDR() + vrtNetworkCIDR = getVrtNetworkCIDR() + vrtSlotsSize = GetVrtSlotsSize() ) +// Slot network allocation +// +// For each slot, we allocate three IP addresses: +// Host IP - used to access the sandbox from the host machine +// Vpeer and Veth IPs - used by the sandbox to communicate with the host +// +// Host default namespace creates a /16 CIDR block for the host IPs. +// Slot with Idx 1 will receive 10.11.0.1 and so on. Its allocated incrementally by slot Idx. +// Host mask is /32 because we only use one IP per slot. +// +// Vrt addresses (vpeer and veth) are allocated from a /31 CIDR block so we can use CIDR for network link routing. +// By default, they are using 10.12.0.0/16 CIDR block, that can be configured via environment variable. +// Vpeer receives the first IP in the block, and Veth receives the second IP. Block is calculated as (slot index * addresses per slot allocation). +// Vrt address per slot is always 2, so we can allocate /31 CIDR block for each slot. type Slot struct { Key string Idx int -} -func (s *Slot) VpeerName() string { - return "eth0" -} + Firewall *Firewall + + // firewallCustomRules is used to track if custom firewall rules are set for the slot and need a cleanup. + firewallCustomRules atomic.Bool -func (s *Slot) getOctets() (int, int) { - rem := s.Idx % octetSize - octet := (s.Idx - rem) / octetSize + vPeerIp net.IP + vEthIp net.IP + vrtMask net.IPMask - return octet, rem + tapIp net.IP + tapMask net.IPMask + + // HostIP is IP address for the sandbox from the host machine. + // You can use it to make requests to the sandbox. + hostIp net.IP + hostNet *net.IPNet + hostCIDR string } -func (s *Slot) VpeerIP() string { - firstOctet, secondOctet := s.getOctets() +func NewSlot(key string, idx int) (*Slot, error) { + if idx < 1 || idx > vrtSlotsSize { + return nil, fmt.Errorf("slot index %d is out of range [1, %d)", idx, vrtSlotsSize) + } + + vEthIp, err := netutils.GetIndexedIP(vrtNetworkCIDR, idx*vrtAddressPerSlot) + if err != nil { + return nil, fmt.Errorf("failed to get veth indexed IP: %w", err) + } + + vPeerIp, err := netutils.GetIndexedIP(vrtNetworkCIDR, idx*vrtAddressPerSlot+1) + if err != nil { + return nil, fmt.Errorf("failed to get vpeer indexed IP: %w", err) + } + + vrtCIDR := fmt.Sprintf("%s/%d", vPeerIp.String(), vrtMask) + _, vrtNet, err := net.ParseCIDR(vrtCIDR) + if err != nil { + return nil, fmt.Errorf("failed to parse vrt CIDR: %w", err) + } + + hostIp, err := netutils.GetIndexedIP(hostNetworkCIDR, idx) + if err != nil { + return nil, fmt.Errorf("failed to get host IP: %w", err) + } + + hostCIDR := fmt.Sprintf("%s/%d", hostIp.String(), hostMask) + _, hostNet, err := net.ParseCIDR(hostCIDR) + if err != nil { + return nil, fmt.Errorf("failed to parse host CIDR: %w", err) + } + + tapCIDR := fmt.Sprintf("%s/%d", tapIp, tapMask) + tapIp, tapNet, err := net.ParseCIDR(tapCIDR) + if err != nil { + return nil, fmt.Errorf("failed to parse tap CIDR: %w", err) + } + + slot := &Slot{ + Key: key, + Idx: idx, + + vPeerIp: vPeerIp, + vEthIp: vEthIp, + vrtMask: vrtNet.Mask, + + tapIp: tapIp, + tapMask: tapNet.Mask, + + hostIp: hostIp, + hostNet: hostNet, + hostCIDR: hostCIDR, + } - return fmt.Sprintf("10.%d.%d.2", firstOctet, secondOctet) + return slot, nil } -func (s *Slot) VethIP() string { - firstOctet, secondOctet := s.getOctets() +func (s *Slot) VpeerName() string { + return "eth0" +} - return fmt.Sprintf("10.%d.%d.1", firstOctet, secondOctet) +func (s *Slot) VpeerIP() net.IP { + return s.vPeerIp } -func (s *Slot) VMask() int { - return vMask +func (s *Slot) VethIP() net.IP { + return s.vEthIp } func (s *Slot) VethName() string { return fmt.Sprintf("veth-%d", s.Idx) } -func (s *Slot) VethCIDR() string { - return fmt.Sprintf("%s/%d", s.VethIP(), s.VMask()) +func (s *Slot) VrtMask() net.IPMask { + return s.vrtMask } -func (s *Slot) VpeerCIDR() string { - return fmt.Sprintf("%s/%d", s.VpeerIP(), s.VMask()) +func (s *Slot) HostIP() net.IP { + return s.hostIp } -func (s *Slot) HostCIDR() string { - return fmt.Sprintf("%s/%d", s.HostIP(), s.HostMask()) +func (s *Slot) HostIPString() string { + return s.HostIP().String() } -func (s *Slot) HostMask() int { - return hostMask +func (s *Slot) HostMask() net.IPMask { + return s.hostNet.Mask } -// IP address for the sandbox from the host machine. -// You can use it to make requests to the sandbox. -func (s *Slot) HostIP() string { - firstOctet, secondOctet := s.getOctets() +func (s *Slot) HostNet() *net.IPNet { + return s.hostNet +} - return fmt.Sprintf("192.168.%d.%d", firstOctet, secondOctet) +func (s *Slot) HostCIDR() string { + return s.hostCIDR } func (s *Slot) NamespaceIP() string { @@ -91,124 +179,162 @@ func (s *Slot) NamespaceID() string { } func (s *Slot) TapName() string { - return "tap0" + return tapInterfaceName } -func (s *Slot) TapIP() string { - return "169.254.0.22" +func (s *Slot) TapIP() net.IP { + return s.tapIp +} + +func (s *Slot) TapIPString() string { + return s.tapIp.String() } func (s *Slot) TapMask() int { return tapMask } -func (s *Slot) TapCIDR() string { - return fmt.Sprintf("%s/%d", s.TapIP(), s.TapMask()) +func (s *Slot) TapMaskString() string { + mask := net.CIDRMask(s.TapMask(), 32) + return net.IP(mask).String() } -func NewSlot() (*Slot, error) { - kv := consul.Client.KV() - - var slot *Slot - - trySlot := func(slotIdx int, key string) (*Slot, error) { - status, _, err := kv.CAS(&consulApi.KVPair{ - Key: key, - ModifyIndex: 0, - }, nil) - if err != nil { - return nil, fmt.Errorf("failed to write to Consul KV: %w", err) - } +func (s *Slot) TapCIDR() net.IPMask { + return s.tapMask +} - if status { - return &Slot{ - Idx: slotIdx, - Key: key, - }, nil - } +func (s *Slot) TapMAC() string { + return tapMAC +} - return nil, nil +func (s *Slot) InitializeFirewall() error { + if s.Firewall != nil { + return fmt.Errorf("firewall is already initialized for slot %s", s.Key) } - for randomTry := 1; randomTry <= 10; randomTry++ { - slotIdx := rand.Intn(slotsSize) - key := getKVKey(slotIdx) + fw, err := NewFirewall(s.TapName()) + if err != nil { + return fmt.Errorf("error initializing firewall: %w", err) + } + s.Firewall = fw - maybeSlot, err := trySlot(slotIdx, key) - if err != nil { - return nil, err - } + return nil +} - if maybeSlot != nil { - slot = maybeSlot +func (s *Slot) CloseFirewall() error { + if s.Firewall == nil { + return nil + } - break - } + if err := s.Firewall.Close(); err != nil { + return fmt.Errorf("error closing firewall: %w", err) } + s.Firewall = nil - if slot == nil { - // This is a fallback for the case when all slots are taken. - // There is no Consul lock so it's possible that multiple sandboxes will try to acquire the same slot. - // In this case, only one of them will succeed and other will try with different slots. - reservedKeys, _, keysErr := kv.Keys(consul.ClientID+"/", "", nil) - if keysErr != nil { - return nil, fmt.Errorf("failed to read Consul KV: %w", keysErr) - } + return nil +} - for slotIdx := 0; slotIdx < slotsSize; slotIdx++ { - key := getKVKey(slotIdx) +func (s *Slot) ConfigureInternet(ctx context.Context, tracer trace.Tracer, allowInternet bool) (e error) { + _, span := tracer.Start(ctx, "slot-internet-configure", trace.WithAttributes( + attribute.String("namespace_id", s.NamespaceID()), + attribute.Bool("allow_internet", allowInternet), + )) + defer span.End() - if slices.Contains(reservedKeys, key) { - continue - } + if allowInternet { + // Internet access is allowed by default. + return nil + } - maybeSlot, err := trySlot(slotIdx, key) - if err != nil { - return nil, err - } + s.firewallCustomRules.Store(true) - if maybeSlot != nil { - slot = maybeSlot + n, err := ns.GetNS(filepath.Join(netNamespacesDir, s.NamespaceID())) + if err != nil { + return fmt.Errorf("failed to get slot network namespace '%s': %w", s.NamespaceID(), err) + } + defer n.Close() - break - } + err = n.Do(func(_ ns.NetNS) error { + err = s.Firewall.AddBlockedIP("0.0.0.0/0") + if err != nil { + return fmt.Errorf("error setting firewall rules: %w", err) } - } - if slot == nil { - return nil, fmt.Errorf("failed to acquire IP slot: no empty slots found") + return nil + }) + if err != nil { + return fmt.Errorf("failed execution in network namespace '%s': %w", s.NamespaceID(), err) } - return slot, nil + return nil } -func (ips *Slot) Release() error { - kv := consul.Client.KV() +func (s *Slot) ResetInternet(ctx context.Context, tracer trace.Tracer) error { + _, span := tracer.Start(ctx, "slot-internet-reset", trace.WithAttributes( + attribute.String("namespace_id", s.NamespaceID()), + )) + defer span.End() - pair, _, err := kv.Get(ips.Key, nil) + if !s.firewallCustomRules.CompareAndSwap(true, false) { + return nil + } + + n, err := ns.GetNS(filepath.Join(netNamespacesDir, s.NamespaceID())) if err != nil { - return fmt.Errorf("failed to release IPSlot: Failed to read Consul KV: %w", err) + return fmt.Errorf("failed to get slot network namespace '%s': %w", s.NamespaceID(), err) } + defer n.Close() - if pair == nil { - return fmt.Errorf("IP slot %d was already released", ips.Idx) + err = n.Do(func(_ ns.NetNS) error { + err := s.Firewall.ResetAllCustom() + if err != nil { + return fmt.Errorf("error cleaning firewall rules: %w", err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("failed execution in network namespace '%s': %w", s.NamespaceID(), err) } - status, _, err := kv.DeleteCAS(&consulApi.KVPair{ - Key: ips.Key, - ModifyIndex: pair.ModifyIndex, - }, nil) + return nil +} + +func getHostNetworkCIDR() *net.IPNet { + cidr := env.GetEnv("SANDBOXES_HOST_NETWORK_CIDR", defaultHostNetworkCIDR) + + _, subnet, err := net.ParseCIDR(cidr) if err != nil { - return fmt.Errorf("failed to release IPSlot: Failed to delete slot from Consul KV: %w", err) + log.Fatalf("Failed to parse network CIDR %s: %v", cidr, err) } - if !status { - return fmt.Errorf("IP slot '%d' for was already realocated", ips.Idx) + log.Println("Using host network cidr", "cidr", cidr) + return subnet +} + +func getVrtNetworkCIDR() *net.IPNet { + cidr := env.GetEnv("SANDBOXES_VRT_NETWORK_CIDR", defaultVrtNetworkCIDR) + + _, subnet, err := net.ParseCIDR(cidr) + if err != nil { + log.Fatalf("Failed to parse network CIDR %s: %v", cidr, err) } - return nil + log.Printf("Using vrt network cidr %s", cidr) + return subnet } -func getKVKey(slotIdx int) string { - return fmt.Sprintf("%s/%d", consul.ClientID, slotIdx) +func GetVrtSlotsSize() int { + ones, _ := getVrtNetworkCIDR().Mask.Size() + + // total IPs in the CIDR block + totalIPs := 1 << (32 - ones) + + // total slots that we can allocate + // we need to divide total IPs by number of addresses per slot (vpeer and veth) + // then we subtract the number of addresses so it will not overflow, because we are adding them incrementally by slot index + totalSlots := (totalIPs / vrtAddressPerSlot) - vrtAddressPerSlot + + log.Printf("Using network slot size: %d", totalSlots) + return totalSlots } diff --git a/packages/orchestrator/internal/sandbox/network/storage.go b/packages/orchestrator/internal/sandbox/network/storage.go new file mode 100644 index 0000000..b9facb8 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/storage.go @@ -0,0 +1,26 @@ +package network + +import ( + "context" + "os" + + "go.opentelemetry.io/otel/trace" + + "github.com/e2b-dev/infra/packages/shared/pkg/env" +) + +var localNamespaceStorageSwitch = os.Getenv("USE_LOCAL_NAMESPACE_STORAGE") + +type Storage interface { + Acquire(ctx context.Context) (*Slot, error) + Release(*Slot) error +} + +// NewStorage creates a new slot storage based on the environment, we are ok with using a memory storage for local +func NewStorage(slotsSize int, clientID string, tracer trace.Tracer) (Storage, error) { + if env.IsDevelopment() || localNamespaceStorageSwitch == "true" { + return NewStorageLocal(slotsSize, tracer) + } + + return NewStorageKV(slotsSize, clientID) +} diff --git a/packages/orchestrator/internal/sandbox/network/storage_kv.go b/packages/orchestrator/internal/sandbox/network/storage_kv.go new file mode 100644 index 0000000..a5a023e3 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/storage_kv.go @@ -0,0 +1,149 @@ +package network + +import ( + "context" + "fmt" + "math/rand" + "slices" + + consulApi "github.com/hashicorp/consul/api" + + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +type StorageKV struct { + slotsSize int + consulClient *consulApi.Client + clientID string +} + +func (s *StorageKV) getKVKey(slotIdx int) string { + return fmt.Sprintf("%s/%d", s.clientID, slotIdx) +} + +func NewStorageKV(slotsSize int, clientID string) (*StorageKV, error) { + consulToken := utils.RequiredEnv("CONSUL_TOKEN", "Consul token for authenticating requests to the Consul API") + + consulClient, err := newConsulClient(consulToken) + if err != nil { + return nil, fmt.Errorf("failed to init StorageKV consul client: %w", err) + } + + return &StorageKV{ + slotsSize: slotsSize, + consulClient: consulClient, + clientID: clientID, + }, nil +} + +func newConsulClient(token string) (*consulApi.Client, error) { + config := consulApi.DefaultConfig() + config.Token = token + + consulClient, err := consulApi.NewClient(config) + if err != nil { + return nil, fmt.Errorf("failed to initialize Consul client: %w", err) + } + + return consulClient, nil +} + +func (s *StorageKV) Acquire(_ context.Context) (*Slot, error) { + kv := s.consulClient.KV() + + var slot *Slot + + trySlot := func(slotIdx int, key string) (*Slot, error) { + status, _, err := kv.CAS(&consulApi.KVPair{ + Key: key, + ModifyIndex: 0, + }, nil) + if err != nil { + return nil, fmt.Errorf("failed to write to Consul KV: %w", err) + } + + if status { + return NewSlot(key, slotIdx) + } + + return nil, nil + } + + for randomTry := 1; randomTry <= 10; randomTry++ { + slotIdx := rand.Intn(s.slotsSize) + key := s.getKVKey(slotIdx) + + maybeSlot, err := trySlot(slotIdx, key) + if err != nil { + return nil, err + } + + if maybeSlot != nil { + slot = maybeSlot + + break + } + } + + if slot == nil { + // This is a fallback for the case when all slots are taken. + // There is no Consul lock so it's possible that multiple sandboxes will try to acquire the same slot. + // In this case, only one of them will succeed and other will try with different slots. + reservedKeys, _, keysErr := kv.Keys(s.clientID+"/", "", nil) + if keysErr != nil { + return nil, fmt.Errorf("failed to read Consul KV: %w", keysErr) + } + + for slotIdx := 0; slotIdx < s.slotsSize; slotIdx++ { + key := s.getKVKey(slotIdx) + + if slices.Contains(reservedKeys, key) { + continue + } + + maybeSlot, err := trySlot(slotIdx, key) + if err != nil { + return nil, err + } + + if maybeSlot != nil { + slot = maybeSlot + + break + } + } + } + + if slot == nil { + return nil, fmt.Errorf("failed to acquire IP slot: no empty slots found") + } + + return slot, nil +} + +func (s *StorageKV) Release(ips *Slot) error { + kv := s.consulClient.KV() + + pair, _, err := kv.Get(ips.Key, nil) + if err != nil { + return fmt.Errorf("failed to release IPSlot: Failed to read Consul KV: %w", err) + } + + if pair == nil { + return fmt.Errorf("IP slot %d was already released", ips.Idx) + } + + status, _, err := kv.DeleteCAS(&consulApi.KVPair{ + Key: ips.Key, + ModifyIndex: pair.ModifyIndex, + }, nil) + if err != nil { + return fmt.Errorf("failed to release IPSlot: Failed to delete slot from Consul KV: %w", err) + } + + if !status { + return fmt.Errorf("IP slot '%d' for was already realocated", ips.Idx) + } + + return nil +} diff --git a/packages/orchestrator/internal/sandbox/network/storage_local.go b/packages/orchestrator/internal/sandbox/network/storage_local.go new file mode 100644 index 0000000..bc21502 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/storage_local.go @@ -0,0 +1,165 @@ +package network + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +type StorageLocal struct { + slotsSize int + foreignNs map[string]struct{} + acquiredNs map[string]struct{} + acquiredNsMu sync.Mutex + tracer trace.Tracer +} + +const netNamespacesDir = "/var/run/netns" + +func NewStorageLocal(slotsSize int, tracer trace.Tracer) (*StorageLocal, error) { + // get namespaces that we want to always skip + foreignNs, err := getForeignNamespaces() + if err != nil { + return nil, fmt.Errorf("error getting already used namespaces: %v", err) + } + + foreignNsMap := make(map[string]struct{}) + for _, ns := range foreignNs { + foreignNsMap[ns] = struct{}{} + zap.L().Info(fmt.Sprintf("Found foreign namespace: %s", ns)) + } + + return &StorageLocal{ + foreignNs: foreignNsMap, + slotsSize: slotsSize, + acquiredNs: make(map[string]struct{}, slotsSize), + acquiredNsMu: sync.Mutex{}, + tracer: tracer, + }, nil +} + +func (s *StorageLocal) Acquire(ctx context.Context) (*Slot, error) { + spanCtx, span := s.tracer.Start(ctx, "network-namespace-acquire") + defer span.End() + + acquireTimeoutCtx, acquireCancel := context.WithTimeout(spanCtx, time.Millisecond*500) + defer acquireCancel() + + s.acquiredNsMu.Lock() + defer s.acquiredNsMu.Unlock() + + // we skip the first slot because it's the host slot + slotIdx := 1 + + for { + select { + case <-acquireTimeoutCtx.Done(): + return nil, fmt.Errorf("failed to acquire IP slot: timeout") + default: + if len(s.acquiredNs) > s.slotsSize { + return nil, fmt.Errorf("failed to acquire IP slot: no empty slots found") + } + + slotIdx++ + slotName := getSlotName(slotIdx) + + // skip the slot if it's already in use by foreign program + if _, found := s.foreignNs[slotName]; found { + continue + } + + // skip the slot if it's already acquired + if _, found := s.acquiredNs[slotName]; found { + continue + } + + // check if the slot can be acquired + available, err := isNamespaceAvailable(slotName) + if err != nil { + return nil, fmt.Errorf("error checking if namespace is available: %v", err) + } + + if !available { + s.foreignNs[slotName] = struct{}{} + zap.L().Debug("Skipping slot because not available", zap.String("slot", slotName)) + continue + } + + s.acquiredNs[slotName] = struct{}{} + slotKey := getLocalKey(slotIdx) + + return NewSlot(slotKey, slotIdx) + } + } +} + +func (s *StorageLocal) Release(ips *Slot) error { + s.acquiredNsMu.Lock() + defer s.acquiredNsMu.Unlock() + + slotName := getSlotName(ips.Idx) + delete(s.acquiredNs, slotName) + + return nil +} + +func isNamespaceAvailable(name string) (bool, error) { + nsPath := filepath.Join(netNamespacesDir, name) + _, err := os.Stat(nsPath) + + if os.IsNotExist(err) { + // Namespace does not exist, so it's available + return true, nil + } else if err != nil { + // Some other error + return false, err + } + + // File exists so namespace is in use. + return false, nil +} + +func getForeignNamespaces() ([]string, error) { + var ns []string + + files, err := os.ReadDir(netNamespacesDir) + if err != nil { + // Folder does not exist, so we can assume no namespaces are in use + if os.IsNotExist(err) { + return ns, nil + } + + return nil, fmt.Errorf("error reading netns directory: %v", err) + } + + for _, file := range files { + if file.IsDir() { + continue + } + + name := file.Name() + if name == "host" { + continue + } + + ns = append(ns, name) + } + + return ns, nil +} + +func getSlotName(slotIdx int) string { + slotIdxStr := strconv.Itoa(slotIdx) + return fmt.Sprintf("ns-%s", slotIdxStr) +} + +func getLocalKey(slotIdx int) string { + return strconv.Itoa(slotIdx) +} diff --git a/packages/orchestrator/internal/sandbox/network/storage_memory.go b/packages/orchestrator/internal/sandbox/network/storage_memory.go new file mode 100644 index 0000000..a06adc2 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/network/storage_memory.go @@ -0,0 +1,52 @@ +package network + +import ( + "context" + "fmt" + "strconv" + "sync" +) + +type StorageMemory struct { + slotsSize int + freeSlots []bool + freeSlotsMu sync.Mutex +} + +func NewStorageMemory(slotsSize int) (*StorageMemory, error) { + return &StorageMemory{ + slotsSize: slotsSize, + freeSlots: make([]bool, slotsSize), + freeSlotsMu: sync.Mutex{}, + }, nil +} + +func (s *StorageMemory) Acquire(_ context.Context) (*Slot, error) { + s.freeSlotsMu.Lock() + defer s.freeSlotsMu.Unlock() + + // Simple slot tracking in memory + // We skip the first slot because it's the host slot + for slotIdx := 1; slotIdx < s.slotsSize; slotIdx++ { + key := getMemoryKey(slotIdx) + if !s.freeSlots[slotIdx] { + s.freeSlots[slotIdx] = true + return NewSlot(key, slotIdx) + } + } + + return nil, fmt.Errorf("failed to acquire IP slot: no empty slots found") +} + +func (s *StorageMemory) Release(ips *Slot) error { + s.freeSlotsMu.Lock() + defer s.freeSlotsMu.Unlock() + + s.freeSlots[ips.Idx] = false + + return nil +} + +func getMemoryKey(slotIdx int) string { + return strconv.Itoa(slotIdx) +} diff --git a/packages/orchestrator/internal/sandbox/rootfs/cow.go b/packages/orchestrator/internal/sandbox/rootfs/cow.go deleted file mode 100644 index fdd11bf..0000000 --- a/packages/orchestrator/internal/sandbox/rootfs/cow.go +++ /dev/null @@ -1,151 +0,0 @@ -package rootfs - -import ( - "context" - "errors" - "fmt" - "io" - "log" - "time" - - "github.com/bits-and-blooms/bitset" - - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" - "github.com/e2b-dev/infra/packages/shared/pkg/utils" -) - -type CowDevice struct { - overlay *block.Overlay - mnt *nbd.DirectPathMount - - ready *utils.SetOnce[string] - - blockSize int64 - BaseBuildId string - - finishedOperations chan struct{} -} - -func NewCowDevice(rootfs *template.Storage, cachePath string, blockSize int64) (*CowDevice, error) { - size, err := rootfs.Size() - if err != nil { - return nil, fmt.Errorf("error getting device size: %w", err) - } - - cache, err := block.NewCache(size, blockSize, cachePath, false) - if err != nil { - return nil, fmt.Errorf("error creating cache: %w", err) - } - - overlay := block.NewOverlay(rootfs, cache, blockSize) - - mnt := nbd.NewDirectPathMount(overlay) - - return &CowDevice{ - mnt: mnt, - overlay: overlay, - ready: utils.NewSetOnce[string](), - blockSize: blockSize, - finishedOperations: make(chan struct{}, 1), - BaseBuildId: rootfs.Header().Metadata.BaseBuildId.String(), - }, nil -} - -func (o *CowDevice) Start(ctx context.Context) error { - deviceIndex, err := o.mnt.Open(ctx) - if err != nil { - return o.ready.SetError(fmt.Errorf("error opening overlay file: %w", err)) - } - - return o.ready.SetValue(nbd.GetDevicePath(deviceIndex)) -} - -func (o *CowDevice) Export(ctx context.Context, out io.Writer, stopSandbox func() error) (*bitset.BitSet, error) { - cache, err := o.overlay.EjectCache() - if err != nil { - return nil, fmt.Errorf("error ejecting cache: %w", err) - } - - // the error is already logged in go routine in SandboxCreate handler - go stopSandbox() - - select { - case <-o.finishedOperations: - break - case <-ctx.Done(): - return nil, fmt.Errorf("timeout waiting for overlay device to be released") - } - - dirty, err := cache.Export(out) - if err != nil { - return nil, fmt.Errorf("error exporting cache: %w", err) - } - - err = cache.Close() - if err != nil { - return nil, fmt.Errorf("error closing cache: %w", err) - } - - return dirty, nil -} - -func (o *CowDevice) Close() error { - var errs []error - - err := o.mnt.Close() - if err != nil { - errs = append(errs, fmt.Errorf("error closing overlay mount: %w", err)) - } - - o.finishedOperations <- struct{}{} - - err = o.overlay.Close() - if err != nil { - errs = append(errs, fmt.Errorf("error closing overlay cache: %w", err)) - } - - devicePath, err := o.ready.Wait() - if err != nil { - errs = append(errs, fmt.Errorf("error getting overlay path: %w", err)) - - return errors.Join(errs...) - } - - slot, err := nbd.GetDeviceSlot(devicePath) - if err != nil { - errs = append(errs, fmt.Errorf("error getting overlay slot: %w", err)) - - return errors.Join(errs...) - } - - counter := 0 - for { - counter++ - err := nbd.Pool.ReleaseDevice(slot) - if errors.Is(err, nbd.ErrDeviceInUse{}) { - if counter%100 == 0 { - log.Printf("[%dth try] error releasing overlay device: %v\n", counter, err) - } - - time.Sleep(500 * time.Millisecond) - - continue - } - - if err != nil { - return fmt.Errorf("error releasing overlay device: %w", err) - } - - break - } - - fmt.Printf("overlay device released\n") - - return nil -} - -func (o *CowDevice) Path() (string, error) { - return o.ready.Wait() -} diff --git a/packages/orchestrator/internal/sandbox/rootfs/direct.go b/packages/orchestrator/internal/sandbox/rootfs/direct.go new file mode 100644 index 0000000..d67068c --- /dev/null +++ b/packages/orchestrator/internal/sandbox/rootfs/direct.go @@ -0,0 +1,109 @@ +package rootfs + +import ( + "context" + "fmt" + "io" + "sync/atomic" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +type DirectProvider struct { + tracer trace.Tracer + + cache *block.Cache + path string + + // TODO: Remove when the snapshot flow is improved + finishedOperations chan struct{} + // TODO: Remove when the snapshot flow is improved + exporting atomic.Bool +} + +func NewDirectProvider(tracer trace.Tracer, rootfs block.ReadonlyDevice, path string) (Provider, error) { + size, err := rootfs.Size() + if err != nil { + return nil, fmt.Errorf("error getting device size: %w", err) + } + + blockSize := rootfs.BlockSize() + + cache, err := block.NewCache(size, blockSize, path, true) + if err != nil { + return nil, fmt.Errorf("error creating cache: %w", err) + } + + return &DirectProvider{ + tracer: tracer, + cache: cache, + path: path, + + finishedOperations: make(chan struct{}, 1), + }, nil +} + +func (o *DirectProvider) Start(_ context.Context) error { + return nil +} + +func (o *DirectProvider) ExportDiff( + ctx context.Context, + out io.Writer, + stopSandbox func(context.Context) error, +) (*header.DiffMetadata, error) { + ctx, childSpan := o.tracer.Start(ctx, "direct-provider-export") + defer childSpan.End() + + o.exporting.CompareAndSwap(false, true) + + // the error is already logged in go routine in SandboxCreate handler + go func() { + err := stopSandbox(ctx) + if err != nil { + zap.L().Error("error stopping sandbox on cow export", zap.Error(err)) + } + }() + + select { + case <-o.finishedOperations: + break + case <-ctx.Done(): + return nil, fmt.Errorf("timeout waiting for overlay device to be released") + } + telemetry.ReportEvent(ctx, "sandbox stopped") + + o.cache.MarkAllAsDirty() + m, err := o.cache.ExportToDiff(out) + if err != nil { + return nil, fmt.Errorf("error exporting cache: %w", err) + } + + telemetry.ReportEvent(ctx, "cache exported") + + err = o.cache.Close() + if err != nil { + return nil, fmt.Errorf("error closing cache: %w", err) + } + + return m, nil +} + +func (o *DirectProvider) Close(_ context.Context) error { + o.finishedOperations <- struct{}{} + + if !o.exporting.CompareAndSwap(false, true) { + return nil + } + + return o.cache.Close() +} + +func (o *DirectProvider) Path() (string, error) { + return o.path, nil +} diff --git a/packages/orchestrator/internal/sandbox/rootfs/nbd.go b/packages/orchestrator/internal/sandbox/rootfs/nbd.go new file mode 100644 index 0000000..464ca52 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/rootfs/nbd.go @@ -0,0 +1,185 @@ +package rootfs + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "syscall" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "golang.org/x/sys/unix" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +type NBDProvider struct { + overlay *block.Overlay + mnt *nbd.DirectPathMount + + ready *utils.SetOnce[string] + + blockSize int64 + + finishedOperations chan struct{} + devicePool *nbd.DevicePool + + tracer trace.Tracer +} + +func NewNBDProvider(tracer trace.Tracer, rootfs block.ReadonlyDevice, cachePath string, devicePool *nbd.DevicePool) (Provider, error) { + size, err := rootfs.Size() + if err != nil { + return nil, fmt.Errorf("error getting device size: %w", err) + } + + blockSize := rootfs.BlockSize() + + cache, err := block.NewCache(size, blockSize, cachePath, false) + if err != nil { + return nil, fmt.Errorf("error creating cache: %w", err) + } + + overlay := block.NewOverlay(rootfs, cache, blockSize) + + mnt := nbd.NewDirectPathMount(tracer, overlay, devicePool) + + return &NBDProvider{ + tracer: tracer, + mnt: mnt, + overlay: overlay, + ready: utils.NewSetOnce[string](), + finishedOperations: make(chan struct{}, 1), + blockSize: blockSize, + devicePool: devicePool, + }, nil +} + +func (o *NBDProvider) Start(ctx context.Context) error { + deviceIndex, err := o.mnt.Open(ctx) + if err != nil { + return o.ready.SetError(fmt.Errorf("error opening overlay file: %w", err)) + } + + return o.ready.SetValue(nbd.GetDevicePath(deviceIndex)) +} + +func (o *NBDProvider) ExportDiff( + parentCtx context.Context, + out io.Writer, + stopSandbox func(ctx context.Context) error, +) (*header.DiffMetadata, error) { + childCtx, childSpan := o.tracer.Start(parentCtx, "cow-export") + defer childSpan.End() + + cache, err := o.overlay.EjectCache() + if err != nil { + return nil, fmt.Errorf("error ejecting cache: %w", err) + } + + // the error is already logged in go routine in SandboxCreate handler + go func() { + err := stopSandbox(childCtx) + if err != nil { + zap.L().Error("error stopping sandbox on cow export", zap.Error(err)) + } + }() + + select { + case <-o.finishedOperations: + break + case <-childCtx.Done(): + return nil, fmt.Errorf("timeout waiting for overlay device to be released") + } + telemetry.ReportEvent(childCtx, "sandbox stopped") + + m, err := cache.ExportToDiff(out) + if err != nil { + return nil, fmt.Errorf("error exporting cache: %w", err) + } + + telemetry.ReportEvent(childCtx, "cache exported") + + err = cache.Close() + if err != nil { + return nil, fmt.Errorf("error closing cache: %w", err) + } + + return m, nil +} + +func (o *NBDProvider) Close(ctx context.Context) error { + childCtx, childSpan := o.tracer.Start(ctx, "cow-close") + defer childSpan.End() + + var errs []error + + err := o.flush(childCtx) + if err != nil { + errs = append(errs, fmt.Errorf("error flushing cow device: %w", err)) + } + + err = o.mnt.Close(childCtx) + if err != nil { + errs = append(errs, fmt.Errorf("error closing overlay mount: %w", err)) + } + + o.finishedOperations <- struct{}{} + + err = o.overlay.Close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing overlay cache: %w", err)) + } + + zap.L().Info("overlay device released") + + return errors.Join(errs...) +} + +func (o *NBDProvider) Path() (string, error) { + return o.ready.Wait() +} + +// flush flushes the data to the operating system's buffer. +func (o *NBDProvider) flush(ctx context.Context) error { + telemetry.ReportEvent(ctx, "flushing cow device") + defer telemetry.ReportEvent(ctx, "flushing cow done") + + nbdPath, err := o.Path() + if err != nil { + return fmt.Errorf("failed to get cow path: %w", err) + } + + file, err := os.Open(nbdPath) + if err != nil { + return fmt.Errorf("failed to open cow path: %w", err) + } + defer func() { + err := file.Close() + if err != nil { + zap.L().Error("failed to close nbd file", zap.Error(err)) + } + }() + + if err := unix.IoctlSetInt(int(file.Fd()), unix.BLKFLSBUF, 0); err != nil { + return fmt.Errorf("ioctl BLKFLSBUF failed: %w", err) + } + + err = syscall.Fsync(int(file.Fd())) + if err != nil { + return fmt.Errorf("failed to fsync cow path: %w", err) + } + + err = file.Sync() + if err != nil { + return fmt.Errorf("failed to sync cow path: %w", err) + } + + return nil +} diff --git a/packages/orchestrator/internal/sandbox/rootfs/rootfs.go b/packages/orchestrator/internal/sandbox/rootfs/rootfs.go new file mode 100644 index 0000000..4fa4402 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/rootfs/rootfs.go @@ -0,0 +1,15 @@ +package rootfs + +import ( + "context" + "io" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" +) + +type Provider interface { + Start(ctx context.Context) error + Close(ctx context.Context) error + Path() (string, error) + ExportDiff(ctx context.Context, out io.Writer, stopSandbox func(context.Context) error) (*header.DiffMetadata, error) +} diff --git a/packages/orchestrator/internal/sandbox/sandbox.go b/packages/orchestrator/internal/sandbox/sandbox.go index af05ea5..f645313 100644 --- a/packages/orchestrator/internal/sandbox/sandbox.go +++ b/packages/orchestrator/internal/sandbox/sandbox.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + package sandbox import ( @@ -6,70 +9,238 @@ import ( "fmt" "net/http" "os" - "syscall" "time" "github.com/google/uuid" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "golang.org/x/mod/semver" - "golang.org/x/sys/unix" + "go.uber.org/zap" - "github.com/e2b-dev/infra/packages/orchestrator/internal/dns" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/fc" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/rootfs" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/uffd" + "github.com/e2b-dev/infra/packages/shared/pkg/env" "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" - "github.com/e2b-dev/infra/packages/shared/pkg/logs" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" "github.com/e2b-dev/infra/packages/shared/pkg/utils" ) +var defaultEnvdTimeout = utils.Must(time.ParseDuration(env.GetEnv("ENVD_TIMEOUT", "10s"))) + var httpClient = http.Client{ Timeout: 10 * time.Second, } +type Resources struct { + Slot *network.Slot + rootfs rootfs.Provider + memory uffd.MemoryBackend + uffdExit chan error +} + +type Metadata struct { + Config *orchestrator.SandboxConfig + StartedAt time.Time + EndAt time.Time +} + type Sandbox struct { + *Resources + *Metadata + files *storage.SandboxFiles cleanup *Cleanup process *fc.Process - uffd *uffd.Uffd - rootfs *rootfs.CowDevice - Config *orchestrator.SandboxConfig - StartedAt time.Time - EndAt time.Time + template template.Template - Slot network.Slot - Logger *logs.SandboxLogger + Checks *Checks +} - uffdExit chan error +func (m *Metadata) LoggerMetadata() sbxlogger.SandboxMetadata { + return sbxlogger.SandboxMetadata{ + SandboxID: m.Config.SandboxId, + TemplateID: m.Config.TemplateId, + TeamID: m.Config.TeamId, + } +} - template template.Template +type networkSlotRes struct { + slot *network.Slot + err error +} + +func CreateSandbox( + ctx context.Context, + tracer trace.Tracer, + networkPool *network.Pool, + _ *nbd.DevicePool, + config *orchestrator.SandboxConfig, + template template.Template, + sandboxTimeout time.Duration, + rootfsCachePath string, + processOptions fc.ProcessOptions, + allowInternet bool, +) (*Sandbox, *Cleanup, error) { + childCtx, childSpan := tracer.Start(ctx, "new-sandbox") + defer childSpan.End() + + cleanup := NewCleanup() + + ipsCh := getNetworkSlotAsync(childCtx, tracer, networkPool, cleanup, allowInternet) + defer func() { + // Ensure the slot is received from chan so the slot is cleaned up properly in cleanup + <-ipsCh + }() + + sandboxFiles := template.Files().NewSandboxFiles(config.SandboxId) + cleanup.Add(func(ctx context.Context) error { + filesErr := cleanupFiles(sandboxFiles) + if filesErr != nil { + return fmt.Errorf("failed to cleanup files: %w", filesErr) + } + + return nil + }) + + rootFS, err := template.Rootfs() + if err != nil { + return nil, cleanup, fmt.Errorf("failed to get rootfs: %w", err) + } - healthcheckCtx *utils.LockableCancelableContext + rootfsProvider, err := rootfs.NewDirectProvider( + tracer, + rootFS, + // Populate direct cache directly from the source file + // This is needed for marking all blocks as dirty and being able to read them directly + rootfsCachePath, + ) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to create rootfs overlay: %w", err) + } + cleanup.Add(func(ctx context.Context) error { + return rootfsProvider.Close(ctx) + }) + go func() { + runErr := rootfsProvider.Start(childCtx) + if runErr != nil { + zap.L().Error("rootfs overlay error", zap.Error(runErr)) + } + }() + + memfile, err := template.Memfile() + if err != nil { + return nil, cleanup, fmt.Errorf("failed to get memfile: %w", err) + } + + memfileSize, err := memfile.Size() + if err != nil { + return nil, cleanup, fmt.Errorf("failed to get memfile size: %w", err) + } + + // / ==== END of resources initialization ==== + rootfsPath, err := rootfsProvider.Path() + if err != nil { + return nil, cleanup, fmt.Errorf("failed to get rootfs path: %w", err) + } + ips := <-ipsCh + if ips.err != nil { + return nil, cleanup, fmt.Errorf("failed to get network slot: %w", err) + } + fcHandle, err := fc.NewProcess( + childCtx, + tracer, + ips.slot, + sandboxFiles, + rootfsPath, + config.BaseTemplateId, + config.BuildId, + ) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to init FC: %w", err) + } + + telemetry.ReportEvent(childCtx, "created fc client") + + err = fcHandle.Create( + childCtx, + tracer, + config.SandboxId, + config.TemplateId, + config.TeamId, + config.Vcpu, + config.RamMb, + config.HugePages, + processOptions, + ) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to create FC: %w", err) + } + telemetry.ReportEvent(childCtx, "created fc process") + + resources := &Resources{ + Slot: ips.slot, + rootfs: rootfsProvider, + memory: uffd.NewNoopMemory(memfileSize, memfile.BlockSize()), + uffdExit: make(chan error, 1), + } + + metadata := &Metadata{ + Config: config, + + StartedAt: time.Now(), + EndAt: time.Now().Add(sandboxTimeout), + } + + sbx := &Sandbox{ + Resources: resources, + Metadata: metadata, + + template: template, + files: sandboxFiles, + process: fcHandle, + + cleanup: cleanup, + } + + checks, err := NewChecks(ctx, tracer, sbx, false) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to create health check: %w", err) + } + sbx.Checks = checks + + cleanup.AddPriority(func(ctx context.Context) error { + return sbx.Close(ctx, tracer) + }) + + return sbx, cleanup, nil } -// Run cleanup functions for the already initialized resources if there is any error or after you are done with the started sandbox. -func NewSandbox( +// ResumeSandbox resumes the sandbox from already saved template or snapshot. +// IMPORTANT: You have to run cleanup functions for the already initialized resources even if there is any error, +// or after you are done with the started sandbox. +func ResumeSandbox( ctx context.Context, tracer trace.Tracer, - dns *dns.DNS, networkPool *network.Pool, templateCache *template.Cache, config *orchestrator.SandboxConfig, traceID string, startedAt time.Time, endAt time.Time, - logger *logs.SandboxLogger, - isSnapshot bool, baseTemplateID string, + devicePool *nbd.DevicePool, + allowInternet, + useClickhouseMetrics bool, ) (*Sandbox, *Cleanup, error) { childCtx, childSpan := tracer.Start(ctx, "new-sandbox") defer childSpan.End() @@ -81,34 +252,19 @@ func NewSandbox( config.BuildId, config.KernelVersion, config.FirecrackerVersion, - config.HugePages, - isSnapshot, ) if err != nil { return nil, cleanup, fmt.Errorf("failed to get template snapshot data: %w", err) } - networkCtx, networkSpan := tracer.Start(childCtx, "get-network-slot") - defer networkSpan.End() - - ips, err := networkPool.Get(networkCtx) - if err != nil { - return nil, cleanup, fmt.Errorf("failed to get network slot: %w", err) - } - - cleanup.Add(func() error { - returnErr := networkPool.Return(ips) - if returnErr != nil { - return fmt.Errorf("failed to return network slot: %w", returnErr) - } - - return nil - }) - networkSpan.End() + ipsCh := getNetworkSlotAsync(childCtx, tracer, networkPool, cleanup, allowInternet) + defer func() { + // Ensure the slot is received from chan so the slot is cleaned up properly in cleanup + <-ipsCh + }() sandboxFiles := t.Files().NewSandboxFiles(config.SandboxId) - - cleanup.Add(func() error { + cleanup.Add(func(ctx context.Context) error { filesErr := cleanupFiles(sandboxFiles) if filesErr != nil { return fmt.Errorf("failed to cleanup files: %w", filesErr) @@ -117,33 +273,27 @@ func NewSandbox( return nil }) - _, overlaySpan := tracer.Start(childCtx, "create-rootfs-overlay") - defer overlaySpan.End() - readonlyRootfs, err := t.Rootfs() if err != nil { return nil, cleanup, fmt.Errorf("failed to get rootfs: %w", err) } - rootfsOverlay, err := rootfs.NewCowDevice( + rootfsOverlay, err := createRootfsOverlay( + childCtx, + tracer, + devicePool, + cleanup, readonlyRootfs, sandboxFiles.SandboxCacheRootfsPath(), - sandboxFiles.RootfsBlockSize(), ) if err != nil { - return nil, cleanup, fmt.Errorf("failed to create overlay file: %w", err) + return nil, cleanup, fmt.Errorf("failed to create rootfs overlay: %w", err) } - cleanup.Add(func() error { - rootfsOverlay.Close() - - return nil - }) - go func() { runErr := rootfsOverlay.Start(childCtx) if runErr != nil { - fmt.Fprintf(os.Stderr, "[sandbox %s]: rootfs overlay error: %v\n", config.SandboxId, runErr) + zap.L().Error("rootfs overlay error", zap.Error(runErr)) } }() @@ -151,167 +301,149 @@ func NewSandbox( if err != nil { return nil, cleanup, fmt.Errorf("failed to get memfile: %w", err) } - overlaySpan.End() - fcUffd, uffdErr := uffd.New(memfile, sandboxFiles.SandboxUffdSocketPath(), sandboxFiles.MemfilePageSize()) - if uffdErr != nil { - return nil, cleanup, fmt.Errorf("failed to create uffd: %w", uffdErr) - } + fcUffdPath := sandboxFiles.SandboxUffdSocketPath() - uffdStartErr := fcUffd.Start(config.SandboxId) - if uffdStartErr != nil { - return nil, cleanup, fmt.Errorf("failed to start uffd: %w", uffdStartErr) + fcUffd, err := serveMemory( + childCtx, + tracer, + cleanup, + memfile, + fcUffdPath, + config.SandboxId, + ) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to serve memory: %w", err) } - cleanup.Add(func() error { - stopErr := fcUffd.Stop() - if stopErr != nil { - return fmt.Errorf("failed to stop uffd: %w", stopErr) - } - - return nil - }) - - uffdExit := make(chan error, 1) - - uffdStartCtx, cancelUffdStartCtx := context.WithCancelCause(childCtx) + uffdStartCtx, cancelUffdStartCtx := context.WithCancelCause(ctx) defer cancelUffdStartCtx(fmt.Errorf("uffd finished starting")) + uffdExit := make(chan error, 1) go func() { - uffdWaitErr := <-fcUffd.Exit + uffdWaitErr := <-fcUffd.Exit() uffdExit <- uffdWaitErr cancelUffdStartCtx(fmt.Errorf("uffd process exited: %w", errors.Join(uffdWaitErr, context.Cause(uffdStartCtx)))) }() + // / ==== END of resources initialization ==== + rootfsPath, err := rootfsOverlay.Path() + if err != nil { + return nil, cleanup, fmt.Errorf("failed to get rootfs path: %w", err) + } + ips := <-ipsCh + if ips.err != nil { + return nil, cleanup, fmt.Errorf("failed to get network slot: %w", err) + } + fcHandle, fcErr := fc.NewProcess( + uffdStartCtx, + tracer, + ips.slot, + sandboxFiles, + rootfsPath, + baseTemplateID, + readonlyRootfs.Header().Metadata.BaseBuildId.String(), + ) + if fcErr != nil { + return nil, cleanup, fmt.Errorf("failed to create FC: %w", fcErr) + } + // todo: check if kernel, firecracker, and envd versions exist snapfile, err := t.Snapfile() if err != nil { return nil, cleanup, fmt.Errorf("failed to get snapfile: %w", err) } - - fcHandle, fcErr := fc.NewProcess( + fcStartErr := fcHandle.Resume( uffdStartCtx, tracer, - ips, - sandboxFiles, &fc.MmdsMetadata{ SandboxId: config.SandboxId, TemplateId: config.TemplateId, - LogsCollectorAddress: logs.CollectorPublicIP, + LogsCollectorAddress: os.Getenv("LOGS_COLLECTOR_PUBLIC_IP"), TraceId: traceID, TeamId: config.TeamId, }, + fcUffdPath, snapfile, - rootfsOverlay, - fcUffd.Ready, - baseTemplateID, + fcUffd.Ready(), ) - if fcErr != nil { - return nil, cleanup, fmt.Errorf("failed to create FC: %w", fcErr) - } - - internalLogger := logger.GetInternalLogger() - fcStartErr := fcHandle.Start(uffdStartCtx, tracer, internalLogger) if fcStartErr != nil { return nil, cleanup, fmt.Errorf("failed to start FC: %w", fcStartErr) } telemetry.ReportEvent(childCtx, "initialized FC") - healthcheckCtx := utils.NewLockableCancelableContext(context.Background()) + resources := &Resources{ + Slot: ips.slot, + rootfs: rootfsOverlay, + memory: fcUffd, + uffdExit: uffdExit, + } - sbx := &Sandbox{ - uffdExit: uffdExit, - files: sandboxFiles, - Slot: ips, - template: t, - process: fcHandle, - uffd: fcUffd, - Config: config, - StartedAt: startedAt, - EndAt: endAt, - rootfs: rootfsOverlay, - Logger: logger, - cleanup: cleanup, - healthcheckCtx: healthcheckCtx, - } - - cleanup.AddPriority(func() error { - var errs []error - - fcStopErr := fcHandle.Stop() - if fcStopErr != nil { - errs = append(errs, fmt.Errorf("failed to stop FC: %w", fcStopErr)) - } + metadata := &Metadata{ + Config: config, - uffdStopErr := fcUffd.Stop() - if uffdStopErr != nil { - errs = append(errs, fmt.Errorf("failed to stop uffd: %w", uffdStopErr)) - } + StartedAt: startedAt, + EndAt: endAt, + } - healthcheckCtx.Lock() - healthcheckCtx.Cancel() - healthcheckCtx.Unlock() + sbx := &Sandbox{ + Resources: resources, + Metadata: metadata, - return errors.Join(errs...) - }) + template: t, + files: sandboxFiles, + process: fcHandle, - // Ensure the syncing takes at most 10 seconds. - syncCtx, syncCancel := context.WithTimeout(childCtx, 10*time.Second) - defer syncCancel() - - // Sync envds. - if semver.Compare(fmt.Sprintf("v%s", config.EnvdVersion), "v0.1.1") >= 0 { - initErr := sbx.initEnvd(syncCtx, tracer, config.EnvVars) - if initErr != nil { - return nil, cleanup, fmt.Errorf("failed to init new envd: %w", initErr) - } else { - telemetry.ReportEvent(childCtx, fmt.Sprintf("[sandbox %s]: initialized new envd", config.SandboxId)) - } - } else { - syncErr := sbx.syncOldEnvd(syncCtx) - if syncErr != nil { - telemetry.ReportError(childCtx, fmt.Errorf("failed to sync old envd: %w", syncErr)) - } else { - telemetry.ReportEvent(childCtx, fmt.Sprintf("[sandbox %s]: synced old envd", config.SandboxId)) - } + cleanup: cleanup, } - sbx.StartedAt = time.Now() - - dns.Add(config.SandboxId, ips.HostIP()) - - telemetry.ReportEvent(childCtx, "added DNS record", attribute.String("ip", ips.HostIP()), attribute.String("hostname", config.SandboxId)) + // Part of the sandbox as we need to stop Checks before pausing the sandbox + // This is to prevent race condition of reporting unhealthy sandbox + checks, err := NewChecks(ctx, tracer, sbx, useClickhouseMetrics) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to create health check: %w", err) + } - cleanup.Add(func() error { - dns.Remove(config.SandboxId, ips.HostIP()) + sbx.Checks = checks - return nil + cleanup.AddPriority(func(ctx context.Context) error { + return sbx.Close(ctx, tracer) }) - go sbx.logHeathAndUsage(healthcheckCtx) + err = sbx.WaitForEnvd( + ctx, + tracer, + defaultEnvdTimeout, + ) + if err != nil { + return nil, cleanup, fmt.Errorf("failed to wait for sandbox start: %w", err) + } + + go sbx.Checks.Start() return sbx, cleanup, nil } -func (s *Sandbox) Wait() error { +func (s *Sandbox) Wait(ctx context.Context) error { select { case fcErr := <-s.process.Exit: - stopErr := s.Stop() + stopErr := s.Stop(ctx) uffdErr := <-s.uffdExit return errors.Join(fcErr, stopErr, uffdErr) case uffdErr := <-s.uffdExit: - stopErr := s.Stop() + stopErr := s.Stop(ctx) fcErr := <-s.process.Exit return errors.Join(uffdErr, stopErr, fcErr) } } -func (s *Sandbox) Stop() error { - err := s.cleanup.Run() +// Stop starts the cleanup process for the sandbox. +func (s *Sandbox) Stop(ctx context.Context) error { + err := s.cleanup.Run(ctx) if err != nil { return fmt.Errorf("failed to stop sandbox: %w", err) } @@ -319,218 +451,440 @@ func (s *Sandbox) Stop() error { return nil } -func (s *Sandbox) Snapshot( +// Close cleans up the sandbox and stops all resources. +func (s *Sandbox) Close(ctx context.Context, tracer trace.Tracer) error { + _, span := tracer.Start(ctx, "sandbox-close") + defer span.End() + + var errs []error + + // Stop the health checks before stopping the sandbox + s.Checks.Stop() + + fcStopErr := s.process.Stop() + if fcStopErr != nil { + errs = append(errs, fmt.Errorf("failed to stop FC: %w", fcStopErr)) + } + + uffdStopErr := s.Resources.memory.Stop() + if uffdStopErr != nil { + errs = append(errs, fmt.Errorf("failed to stop uffd: %w", uffdStopErr)) + } + + return errors.Join(errs...) +} + +func (s *Sandbox) Pause( ctx context.Context, tracer trace.Tracer, snapshotTemplateFiles *storage.TemplateCacheFiles, - releaseLock func(), ) (*Snapshot, error) { - ctx, childSpan := tracer.Start(ctx, "sandbox-snapshot") + childCtx, childSpan := tracer.Start(ctx, "sandbox-snapshot") defer childSpan.End() - buildId, err := uuid.Parse(snapshotTemplateFiles.BuildId) + buildID, err := uuid.Parse(snapshotTemplateFiles.BuildId) if err != nil { return nil, fmt.Errorf("failed to parse build id: %w", err) } - // MEMFILE & SNAPFILE - originalMemfile, err := s.template.Memfile() - if err != nil { - return nil, fmt.Errorf("failed to get original memfile: %w", err) - } + // Stop the health check before pausing the VM + s.Checks.Stop() - memfileMetadata := &header.Metadata{ - Version: 1, - Generation: originalMemfile.Header().Metadata.Generation + 1, - BlockSize: originalMemfile.Header().Metadata.BlockSize, - Size: originalMemfile.Header().Metadata.Size, - BuildId: buildId, - BaseBuildId: originalMemfile.Header().Metadata.BaseBuildId, + if err := s.process.Pause(childCtx, tracer); err != nil { + return nil, fmt.Errorf("failed to pause VM: %w", err) } - s.healthcheckCtx.Lock() - s.healthcheckCtx.Cancel() - s.healthcheckCtx.Unlock() - - err = s.process.Pause(ctx, tracer) - if err != nil { - return nil, fmt.Errorf("error pausing vm: %w", err) + if err := s.memory.Disable(); err != nil { + return nil, fmt.Errorf("failed to disable uffd: %w", err) } - err = s.uffd.Disable() + // Snapfile is not closed as it's returned and cached for later use (like resume) + snapfile := template.NewLocalFileLink(snapshotTemplateFiles.CacheSnapfilePath()) + // Memfile is also closed on diff creation processing + /* The process of snapshotting memory is as follows: + 1. Pause FC via API + 2. Snapshot FC via API—memory dump to “file on disk” that is actually tmpfs, because it is too slow + 3. Create the diff - copy the diff pages from tmpfs to normal disk file + 4. Delete tmpfs file + 5. Unlock so another snapshot can use tmpfs space + */ + memfile, err := storage.AcquireTmpMemfile(childCtx, buildID.String()) if err != nil { - return nil, fmt.Errorf("failed to disable uffd: %w", err) + return nil, fmt.Errorf("failed to acquire memfile snapshot: %w", err) } - - defer os.RemoveAll(snapshotTemplateFiles.CacheMemfileFullSnapshotPath()) + // Close the file even if an error occurs + defer memfile.Close() err = s.process.CreateSnapshot( - ctx, + childCtx, tracer, - snapshotTemplateFiles.CacheSnapfilePath(), - snapshotTemplateFiles.CacheMemfileFullSnapshotPath(), + snapfile.Path(), + memfile.Path(), ) if err != nil { return nil, fmt.Errorf("error creating snapshot: %w", err) } - memfileDirtyPages := s.uffd.Dirty() - - sourceFile, err := os.Open(snapshotTemplateFiles.CacheMemfileFullSnapshotPath()) + // Gather data for postprocessing + originalMemfile, err := s.template.Memfile() + if err != nil { + return nil, fmt.Errorf("failed to get original memfile: %w", err) + } + originalRootfs, err := s.template.Rootfs() if err != nil { - return nil, fmt.Errorf("failed to open memfile: %w", err) + return nil, fmt.Errorf("failed to get original rootfs: %w", err) } - memfileDiffFile, err := build.NewLocalDiffFile( - buildId.String(), - build.Memfile, + // Start POSTPROCESSING + memfileDiff, memfileDiffHeader, err := pauseProcessMemory( + childCtx, + tracer, + buildID, + originalMemfile.Header(), + &MemoryDiffCreator{ + tracer: tracer, + memfile: memfile, + dirtyPages: s.memory.Dirty(), + blockSize: originalMemfile.BlockSize(), + doneHook: func(ctx context.Context) error { + return memfile.Close() + }, + }, ) if err != nil { - return nil, fmt.Errorf("failed to create memfile diff file: %w", err) + return nil, fmt.Errorf("error while post processing: %w", err) } - err = header.CreateDiff(sourceFile, s.files.MemfilePageSize(), memfileDirtyPages, memfileDiffFile) + rootfsDiff, rootfsDiffHeader, err := pauseProcessRootfs( + childCtx, + tracer, + buildID, + originalRootfs.Header(), + &RootfsDiffCreator{ + rootfs: s.rootfs, + stopHook: s.Stop, + }, + ) if err != nil { - return nil, fmt.Errorf("failed to create memfile diff: %w", err) + return nil, fmt.Errorf("error while post processing: %w", err) } - telemetry.ReportEvent(ctx, "created memfile diff") + return &Snapshot{ + Snapfile: snapfile, + MemfileDiff: memfileDiff, + MemfileDiffHeader: memfileDiffHeader, + RootfsDiff: rootfsDiff, + RootfsDiffHeader: rootfsDiffHeader, + }, nil +} - os.RemoveAll(snapshotTemplateFiles.CacheMemfileFullSnapshotPath()) +type Snapshot struct { + MemfileDiff build.Diff + MemfileDiffHeader *header.Header + RootfsDiff build.Diff + RootfsDiffHeader *header.Header + Snapfile *template.LocalFileLink +} - releaseLock() +func (s *Snapshot) Close(_ context.Context) error { + var errs []error - memfileMapping := header.CreateMapping( - memfileMetadata, - &buildId, - memfileDirtyPages, - ) + if err := s.MemfileDiff.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close memfile diff: %w", err)) + } - telemetry.ReportEvent(ctx, "created memfile mapping") + if err := s.RootfsDiff.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close rootfs diff: %w", err)) + } - memfileMappings := header.MergeMappings( - originalMemfile.Header().Mapping, - memfileMapping, - ) + if err := s.Snapfile.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close snapfile: %w", err)) + } - telemetry.ReportEvent(ctx, "merged memfile mappings") + return errors.Join(errs...) +} - snapfile, err := template.NewLocalFile(snapshotTemplateFiles.CacheSnapfilePath()) - if err != nil { - return nil, fmt.Errorf("failed to create local snapfile: %w", err) - } +func pauseProcessMemory( + ctx context.Context, + tracer trace.Tracer, + buildId uuid.UUID, + originalHeader *header.Header, + diffCreator DiffCreator, +) (build.Diff, *header.Header, error) { + ctx, childSpan := tracer.Start(ctx, "process-memory") + defer childSpan.End() - // ROOTFS - originalRootfs, err := s.template.Rootfs() + memfileDiffFile, err := build.NewLocalDiffFile( + build.DefaultCachePath, + buildId.String(), + build.Memfile, + ) if err != nil { - return nil, fmt.Errorf("failed to get original rootfs: %w", err) - } - - rootfsMetadata := &header.Metadata{ - Version: 1, - Generation: originalRootfs.Header().Metadata.Generation + 1, - BlockSize: originalRootfs.Header().Metadata.BlockSize, - Size: originalRootfs.Header().Metadata.Size, - BuildId: buildId, - BaseBuildId: originalRootfs.Header().Metadata.BaseBuildId, + return nil, nil, fmt.Errorf("failed to create memfile diff file: %w", err) } - nbdPath, err := s.rootfs.Path() + m, err := diffCreator.process(ctx, memfileDiffFile) if err != nil { - return nil, fmt.Errorf("failed to get rootfs path: %w", err) + return nil, nil, fmt.Errorf("error creating diff: %w", err) } + telemetry.ReportEvent(ctx, "created diff") - // Flush the data to the operating system's buffer - file, err := os.Open(nbdPath) + memfileMapping, err := m.CreateMapping(ctx, buildId) if err != nil { - return nil, fmt.Errorf("failed to open rootfs path: %w", err) + return nil, nil, fmt.Errorf("failed to create memfile mapping: %w", err) } - if err := unix.IoctlSetInt(int(file.Fd()), unix.BLKFLSBUF, 0); err != nil { - return nil, fmt.Errorf("ioctl BLKFLSBUF failed: %w", err) - } + memfileMappings := header.MergeMappings( + originalHeader.Mapping, + memfileMapping, + ) + // TODO: We can run normalization only when empty mappings are not empty for this snapshot + memfileMappings = header.NormalizeMappings(memfileMappings) + telemetry.ReportEvent(ctx, "merged memfile mappings") - err = syscall.Fsync(int(file.Fd())) + memfileDiff, err := memfileDiffFile.CloseToDiff(int64(originalHeader.Metadata.BlockSize)) if err != nil { - return nil, fmt.Errorf("failed to fsync rootfs path: %w", err) + return nil, nil, fmt.Errorf("failed to convert memfile diff file to local diff: %w", err) } - err = file.Sync() - if err != nil { - return nil, fmt.Errorf("failed to sync rootfs path: %w", err) - } + telemetry.ReportEvent(ctx, "converted memfile diff file to local diff") - telemetry.ReportEvent(ctx, "synced rootfs") + memfileMetadata := originalHeader.Metadata.NextGeneration(buildId) - rootfsDiffFile, err := build.NewLocalDiffFile(buildId.String(), build.Rootfs) + telemetry.SetAttributes(ctx, + attribute.Int64("snapshot.memfile.header.mappings.length", int64(len(memfileMappings))), + attribute.Int64("snapshot.memfile.diff.size", int64(m.Dirty.Count()*uint(originalHeader.Metadata.BlockSize))), + attribute.Int64("snapshot.memfile.mapped_size", int64(memfileMetadata.Size)), + attribute.Int64("snapshot.memfile.block_size", int64(memfileMetadata.BlockSize)), + attribute.Int64("snapshot.metadata.version", int64(memfileMetadata.Version)), + attribute.Int64("snapshot.metadata.generation", int64(memfileMetadata.Generation)), + attribute.String("snapshot.metadata.build_id", memfileMetadata.BuildId.String()), + attribute.String("snapshot.metadata.base_build_id", memfileMetadata.BaseBuildId.String()), + ) + + return memfileDiff, header.NewHeader(memfileMetadata, memfileMappings), nil +} + +func pauseProcessRootfs( + ctx context.Context, + tracer trace.Tracer, + buildId uuid.UUID, + originalHeader *header.Header, + diffCreator DiffCreator, +) (build.Diff, *header.Header, error) { + ctx, childSpan := tracer.Start(ctx, "process-rootfs") + defer childSpan.End() + + rootfsDiffFile, err := build.NewLocalDiffFile(build.DefaultCachePath, buildId.String(), build.Rootfs) if err != nil { - return nil, fmt.Errorf("failed to create rootfs diff: %w", err) + return nil, nil, fmt.Errorf("failed to create rootfs diff: %w", err) } - rootfsDirtyBlocks, err := s.rootfs.Export(ctx, rootfsDiffFile, s.Stop) + rootfsDiffMetadata, err := diffCreator.process(ctx, rootfsDiffFile) if err != nil { - return nil, fmt.Errorf("failed to export rootfs: %w", err) + return nil, nil, fmt.Errorf("error creating diff: %w", err) } telemetry.ReportEvent(ctx, "exported rootfs") - - rootfsMapping := header.CreateMapping( - rootfsMetadata, - &buildId, - rootfsDirtyBlocks, - ) - - telemetry.ReportEvent(ctx, "created rootfs mapping") + rootfsMapping, err := rootfsDiffMetadata.CreateMapping(ctx, buildId) + if err != nil { + return nil, nil, fmt.Errorf("failed to create rootfs diff: %w", err) + } rootfsMappings := header.MergeMappings( - originalRootfs.Header().Mapping, + originalHeader.Mapping, rootfsMapping, ) - + // TODO: We can run normalization only when empty mappings are not empty for this snapshot + rootfsMappings = header.NormalizeMappings(rootfsMappings) telemetry.ReportEvent(ctx, "merged rootfs mappings") - rootfsDiff, err := rootfsDiffFile.ToDiff(int64(originalRootfs.Header().Metadata.BlockSize)) + rootfsDiff, err := rootfsDiffFile.CloseToDiff(int64(originalHeader.Metadata.BlockSize)) if err != nil { - return nil, fmt.Errorf("failed to convert rootfs diff file to local diff: %w", err) + return nil, nil, fmt.Errorf("failed to convert rootfs diff file to local diff: %w", err) } - telemetry.ReportEvent(ctx, "converted rootfs diff file to local diff") - memfileDiff, err := memfileDiffFile.ToDiff(int64(originalMemfile.Header().Metadata.BlockSize)) - if err != nil { - return nil, fmt.Errorf("failed to convert memfile diff file to local diff: %w", err) - } - - telemetry.ReportEvent(ctx, "converted memfile diff file to local diff") + rootfsMetadata := originalHeader.Metadata.NextGeneration(buildId) telemetry.SetAttributes(ctx, - attribute.Int64("snapshot.memfile.header.mappings.length", int64(len(memfileMappings))), attribute.Int64("snapshot.rootfs.header.mappings.length", int64(len(rootfsMappings))), - attribute.Int64("snapshot.memfile.diff.size", int64(memfileDirtyPages.Count()*uint(originalMemfile.Header().Metadata.BlockSize))), - attribute.Int64("snapshot.memfile.mapped_size", int64(memfileMetadata.Size)), - attribute.Int64("snapshot.memfile.block_size", int64(memfileMetadata.BlockSize)), - attribute.Int64("snapshot.rootfs.diff.size", int64(rootfsDirtyBlocks.Count()*uint(originalRootfs.Header().Metadata.BlockSize))), + attribute.Int64("snapshot.rootfs.diff.size", int64(rootfsDiffMetadata.Dirty.Count()*uint(originalHeader.Metadata.BlockSize))), attribute.Int64("snapshot.rootfs.mapped_size", int64(rootfsMetadata.Size)), attribute.Int64("snapshot.rootfs.block_size", int64(rootfsMetadata.BlockSize)), - attribute.Int64("snapshot.metadata.version", int64(memfileMetadata.Version)), - attribute.Int64("snapshot.metadata.generation", int64(memfileMetadata.Generation)), - attribute.String("snapshot.metadata.build_id", memfileMetadata.BuildId.String()), - attribute.String("snapshot.metadata.base_build_id", memfileMetadata.BaseBuildId.String()), ) - return &Snapshot{ - Snapfile: snapfile, - MemfileDiff: memfileDiff, - MemfileDiffHeader: header.NewHeader(memfileMetadata, memfileMappings), - RootfsDiff: rootfsDiff, - RootfsDiffHeader: header.NewHeader(rootfsMetadata, rootfsMappings), - }, nil + return rootfsDiff, header.NewHeader(rootfsMetadata, rootfsMappings), nil } -type Snapshot struct { - MemfileDiff build.Diff - MemfileDiffHeader *header.Header - RootfsDiff build.Diff - RootfsDiffHeader *header.Header - Snapfile *template.LocalFile +func getNetworkSlotAsync( + ctx context.Context, + tracer trace.Tracer, + networkPool *network.Pool, + cleanup *Cleanup, + allowInternet bool, +) chan networkSlotRes { + networkCtx, networkSpan := tracer.Start(ctx, "get-network-slot") + defer networkSpan.End() + + r := make(chan networkSlotRes, 1) + + go func() { + defer close(r) + + ips, err := networkPool.Get(networkCtx, tracer, allowInternet) + if err != nil { + r <- networkSlotRes{nil, fmt.Errorf("failed to get network slot: %w", err)} + return + } + + cleanup.Add(func(ctx context.Context) error { + _, span := tracer.Start(ctx, "network-slot-clean") + defer span.End() + + // We can run this cleanup asynchronously, as it is not important for the sandbox lifecycle + go func() { + returnErr := networkPool.Return(context.Background(), tracer, ips) + if returnErr != nil { + zap.L().Error("failed to return network slot", zap.Error(returnErr)) + } + }() + + return nil + }) + + r <- networkSlotRes{ips, nil} + }() + + return r +} + +func createRootfsOverlay( + ctx context.Context, + tracer trace.Tracer, + devicePool *nbd.DevicePool, + cleanup *Cleanup, + readonlyRootfs block.ReadonlyDevice, + targetCachePath string, +) (rootfs.Provider, error) { + _, overlaySpan := tracer.Start(ctx, "create-rootfs-overlay") + defer overlaySpan.End() + + rootfsOverlay, err := rootfs.NewNBDProvider( + tracer, + readonlyRootfs, + targetCachePath, + devicePool, + ) + if err != nil { + return nil, fmt.Errorf("failed to create overlay file: %w", err) + } + + cleanup.Add(func(ctx context.Context) error { + childCtx, span := tracer.Start(ctx, "rootfs-overlay-close") + defer span.End() + + if rootfsOverlayErr := rootfsOverlay.Close(childCtx); rootfsOverlayErr != nil { + return fmt.Errorf("failed to close overlay file: %w", rootfsOverlayErr) + } + + return nil + }) + + return rootfsOverlay, nil +} + +func serveMemory( + ctx context.Context, + tracer trace.Tracer, + cleanup *Cleanup, + memfile block.ReadonlyDevice, + socketPath string, + sandboxID string, +) (uffd.MemoryBackend, error) { + fcUffd, uffdErr := uffd.New(memfile, socketPath, memfile.BlockSize()) + if uffdErr != nil { + return nil, fmt.Errorf("failed to create uffd: %w", uffdErr) + } + + uffdStartErr := fcUffd.Start(sandboxID) + if uffdStartErr != nil { + return nil, fmt.Errorf("failed to start uffd: %w", uffdStartErr) + } + + cleanup.Add(func(ctx context.Context) error { + _, span := tracer.Start(ctx, "uffd-stop") + defer span.End() + + stopErr := fcUffd.Stop() + if stopErr != nil { + return fmt.Errorf("failed to stop uffd: %w", stopErr) + } + + return nil + }) + + return fcUffd, nil +} + +func (s *Sandbox) WaitForExit( + ctx context.Context, + tracer trace.Tracer, +) error { + ctx, childSpan := tracer.Start(ctx, "sandbox-wait-for-exit") + defer childSpan.End() + + timeout := time.Until(s.EndAt) + + select { + case <-time.After(timeout): + return fmt.Errorf("waiting for exit took too long") + case <-ctx.Done(): + return nil + case err := <-s.process.Exit: + if err == nil { + return nil + } + return fmt.Errorf("fc process exited prematurely: %w", err) + } +} + +func (s *Sandbox) WaitForEnvd( + ctx context.Context, + tracer trace.Tracer, + timeout time.Duration, +) (e error) { + ctx, childSpan := tracer.Start(ctx, "sandbox-wait-for-start") + defer childSpan.End() + + defer func() { + if e != nil { + return + } + // Update the sandbox as started now + s.Metadata.StartedAt = time.Now() + }() + syncCtx, syncCancel := context.WithCancelCause(ctx) + defer syncCancel(nil) + + go func() { + select { + // Ensure the syncing takes at most timeout seconds. + case <-time.After(timeout): + syncCancel(fmt.Errorf("syncing took too long")) + case <-syncCtx.Done(): + return + case err := <-s.process.Exit: + syncCancel(fmt.Errorf("fc process exited prematurely: %w", err)) + } + }() + + initErr := s.initEnvd(syncCtx, tracer, s.Metadata.Config.EnvVars, s.Metadata.Config.EnvdAccessToken) + if initErr != nil { + return fmt.Errorf("failed to init new envd: %w", initErr) + } else { + telemetry.ReportEvent(syncCtx, fmt.Sprintf("[sandbox %s]: initialized new envd", s.Metadata.Config.SandboxId)) + } + + return nil } diff --git a/packages/orchestrator/internal/sandbox/template/cache.go b/packages/orchestrator/internal/sandbox/template/cache.go index 2ec2375..ac8b171 100644 --- a/packages/orchestrator/internal/sandbox/template/cache.go +++ b/packages/orchestrator/internal/sandbox/template/cache.go @@ -3,25 +3,34 @@ package template import ( "context" "fmt" - "os" "time" "github.com/jellydator/ttlcache/v3" + "go.uber.org/zap" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" ) // How long to keep the template in the cache since the last access. // Should be longer than the maximum possible sandbox lifetime. -const templateExpiration = time.Hour * 25 +const ( + templateExpiration = time.Hour * 25 + + buildCacheTTL = time.Hour * 25 + buildCacheDelayEviction = time.Second * 60 + + // buildCacheMaxUsedPercentage the maximum percentage of the cache disk storage + // that can be used before the cache starts evicting items. + buildCacheMaxUsedPercentage = 75.0 +) type Cache struct { - cache *ttlcache.Cache[string, Template] - bucket *s3.BucketHandle - ctx context.Context - buildStore *build.DiffStore + cache *ttlcache.Cache[string, Template] + persistence storage.StorageProvider + ctx context.Context + buildStore *build.DiffStore } func NewCache(ctx context.Context) (*Cache, error) { @@ -34,26 +43,33 @@ func NewCache(ctx context.Context) (*Cache, error) { err := template.Close() if err != nil { - fmt.Printf("[template data cache]: failed to cleanup template data for item %s: %v\n", item.Key(), err) + zap.L().Warn("failed to cleanup template data", zap.String("item_key", item.Key()), zap.Error(err)) } }) go cache.Start() - // Get the S3 bucket for templates - bucket := s3.GetTemplateBucket() - - // Create the build store - buildStore, err := build.NewDiffStore(bucket, ctx) + buildStore, err := build.NewDiffStore( + ctx, + build.DefaultCachePath, + buildCacheTTL, + buildCacheDelayEviction, + buildCacheMaxUsedPercentage, + ) if err != nil { return nil, fmt.Errorf("failed to create build store: %w", err) } + persistence, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get storage provider: %w", err) + } + return &Cache{ - bucket: bucket, - buildStore: buildStore, - cache: cache, - ctx: ctx, + persistence: persistence, + buildStore: buildStore, + cache: cache, + ctx: ctx, }, nil } @@ -61,44 +77,20 @@ func (c *Cache) Items() map[string]*ttlcache.Item[string, Template] { return c.cache.Items() } -// GetTemplate gets a template from the cache or creates a new one. -// In mock mode, it will return a mock template if the template doesn't exist in the cache. func (c *Cache) GetTemplate( templateId, buildId, kernelVersion, firecrackerVersion string, - hugePages bool, - isSnapshot bool, ) (Template, error) { - // Check if we're in mock mode - if os.Getenv("MOCK_SANDBOX") == "true" { - // Try to get the template from the cache first - cacheKey := fmt.Sprintf("%s-%s-%s-%s-%v-%v", templateId, buildId, kernelVersion, firecrackerVersion, hugePages, isSnapshot) - if item := c.cache.Get(cacheKey); item != nil { - return item.Value(), nil - } - - // If not in cache, create a mock template - mockTemplate := newMockTemplate(templateId, buildId, kernelVersion, firecrackerVersion, hugePages) - - // Add it to the cache - item := c.cache.Set(cacheKey, mockTemplate, templateExpiration) - - return item.Value(), nil - } - - // Normal flow for non-mock mode storageTemplate, err := newTemplateFromStorage( templateId, buildId, kernelVersion, firecrackerVersion, - hugePages, - isSnapshot, nil, nil, - c.bucket, + c.persistence, nil, ) if err != nil { @@ -123,31 +115,24 @@ func (c *Cache) AddSnapshot( buildId, kernelVersion, firecrackerVersion string, - hugePages bool, memfileHeader *header.Header, rootfsHeader *header.Header, - localSnapfile *LocalFile, + localSnapfile *LocalFileLink, memfileDiff build.Diff, rootfsDiff build.Diff, ) error { - // Check if we're in mock mode - if os.Getenv("MOCK_SANDBOX") == "true" { - // In mock mode, we don't need to do anything - return nil - } - switch memfileDiff.(type) { case *build.NoDiff: break default: - c.buildStore.Add(buildId, build.Memfile, memfileDiff) + c.buildStore.Add(memfileDiff) } switch rootfsDiff.(type) { case *build.NoDiff: break default: - c.buildStore.Add(buildId, build.Rootfs, rootfsDiff) + c.buildStore.Add(rootfsDiff) } storageTemplate, err := newTemplateFromStorage( @@ -155,11 +140,9 @@ func (c *Cache) AddSnapshot( buildId, kernelVersion, firecrackerVersion, - hugePages, - true, memfileHeader, rootfsHeader, - c.bucket, + c.persistence, localSnapfile, ) if err != nil { diff --git a/packages/orchestrator/internal/sandbox/template/header_helper.go b/packages/orchestrator/internal/sandbox/template/header_helper.go deleted file mode 100644 index 9961362..0000000 --- a/packages/orchestrator/internal/sandbox/template/header_helper.go +++ /dev/null @@ -1,42 +0,0 @@ -package template - -import ( - "fmt" - "io" - - "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" -) - -// Wrapper function to call the real header.Deserialize function that we don't have access to directly -// func deserializeHeader(ctx context.Context, obj StorageObject) (*header.Header, error) { -// reader, err := obj.Reader(ctx) -// if err != nil { -// return nil, fmt.Errorf("failed to get reader: %w", err) -// } -// defer reader.Close() - -// return NewHeaderFromReader(reader) -// } - -// NewHeaderFromReader is a wrapper that mimics the expected behavior of header.NewHeaderFromReader -// Since we don't have access to the actual function, we're implementing a minimal version here -func NewHeaderFromReader(reader io.Reader) (*header.Header, error) { - // In a real implementation, this would parse the serialized header format - // For now, we'll just return a minimal header to allow compilation to proceed - - // This is just a placeholder - the real function would deserialize from the reader - _, err := io.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed to read header data: %w", err) - } - - // For now we'll just create a dummy header to pass compilation - dummyMetadata := &header.Metadata{ - Version: 1, - Generation: 1, - Size: 1024, - BlockSize: 64, - } - - return header.NewHeader(dummyMetadata, nil), nil -} diff --git a/packages/orchestrator/internal/sandbox/template/local_file.go b/packages/orchestrator/internal/sandbox/template/local_file.go index e606b4a..782c019 100644 --- a/packages/orchestrator/internal/sandbox/template/local_file.go +++ b/packages/orchestrator/internal/sandbox/template/local_file.go @@ -4,22 +4,22 @@ import ( "os" ) -type LocalFile struct { +type LocalFileLink struct { path string } -func NewLocalFile( +func NewLocalFileLink( path string, -) (*LocalFile, error) { - return &LocalFile{ +) *LocalFileLink { + return &LocalFileLink{ path: path, - }, nil + } } -func (f *LocalFile) Path() string { +func (f *LocalFileLink) Path() string { return f.path } -func (f *LocalFile) Close() error { +func (f *LocalFileLink) Close() error { return os.RemoveAll(f.path) } diff --git a/packages/orchestrator/internal/sandbox/template/local_template.go b/packages/orchestrator/internal/sandbox/template/local_template.go new file mode 100644 index 0000000..2cdb2f4 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/template/local_template.go @@ -0,0 +1,55 @@ +package template + +import ( + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" +) + +type LocalTemplate struct { + files *storage.TemplateCacheFiles + + memfile block.ReadonlyDevice + rootfs block.ReadonlyDevice +} + +func NewLocalTemplate( + files *storage.TemplateCacheFiles, + rootfs block.ReadonlyDevice, + memfile block.ReadonlyDevice, +) *LocalTemplate { + return &LocalTemplate{ + files: files, + memfile: memfile, + rootfs: rootfs, + } +} + +func (t *LocalTemplate) Close() error { + return closeTemplate(t) +} + +func (t *LocalTemplate) Files() *storage.TemplateCacheFiles { + return t.files +} + +func (t *LocalTemplate) Memfile() (block.ReadonlyDevice, error) { + return t.memfile, nil +} + +func (t *LocalTemplate) Rootfs() (block.ReadonlyDevice, error) { + return t.rootfs, nil +} + +func (t *LocalTemplate) Snapfile() (File, error) { + return &NoopSnapfile{}, nil +} + +type NoopSnapfile struct{} + +func (n *NoopSnapfile) Close() error { + return nil +} + +func (n *NoopSnapfile) Path() string { + return "/dev/null" +} diff --git a/packages/orchestrator/internal/sandbox/template/mock_template.go b/packages/orchestrator/internal/sandbox/template/mock_template.go deleted file mode 100644 index 4695360..0000000 --- a/packages/orchestrator/internal/sandbox/template/mock_template.go +++ /dev/null @@ -1,105 +0,0 @@ -package template - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" -) - -// mockTemplate is a template implementation that doesn't require actual storage access. -// It's used in the mock-sandbox environment for testing. -type mockTemplate struct { - files *storage.TemplateCacheFiles -} - -// newMockTemplate creates a new mock template. -func newMockTemplate( - templateId, - buildId, - kernelVersion, - firecrackerVersion string, - hugePages bool, -) Template { - files, _ := storage.NewTemplateFiles( - templateId, - buildId, - kernelVersion, - firecrackerVersion, - hugePages, - ).NewTemplateCacheFiles() - - // Create the cache directory if it doesn't exist - os.MkdirAll(files.CacheDir(), os.ModePerm) - - return &mockTemplate{ - files: files, - } -} - -// Files returns the template cache files. -func (t *mockTemplate) Files() *storage.TemplateCacheFiles { - return t.files -} - -// Memfile returns a mock storage that doesn't actually read from disk. -func (t *mockTemplate) Memfile() (*Storage, error) { - // Create a mock header - metadata := &header.Metadata{ - Version: 1, - BlockSize: 4096, - Size: 1024 * 1024, // 1MB - Generation: 1, - } - h := header.NewHeader(metadata, nil) - - // Create a mock file - return &Storage{ - header: h, - source: build.NewFile(h, nil, build.Memfile), - }, nil -} - -// Rootfs returns a mock storage that doesn't actually read from disk. -func (t *mockTemplate) Rootfs() (*Storage, error) { - // Create a mock header - metadata := &header.Metadata{ - Version: 1, - BlockSize: 4096, - Size: 10 * 1024 * 1024, // 10MB - Generation: 1, - } - h := header.NewHeader(metadata, nil) - - // Create a mock file - return &Storage{ - header: h, - source: build.NewFile(h, nil, build.Rootfs), - }, nil -} - -// Snapfile returns a mock file that doesn't actually exist on disk. -func (t *mockTemplate) Snapfile() (File, error) { - // Create a mock file path - path := filepath.Join(t.files.CacheDir(), "mock-snapfile") - - // Create an empty file - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("failed to create mock snapfile: %w", err) - } - f.Close() - - return &LocalFile{ - path: path, - }, nil -} - -// Close cleans up any resources used by the mock template. -func (t *mockTemplate) Close() error { - // Nothing to clean up - return nil -} diff --git a/packages/orchestrator/internal/sandbox/template/storage.go b/packages/orchestrator/internal/sandbox/template/storage.go index 91f34b7..8d8dc4c 100644 --- a/packages/orchestrator/internal/sandbox/template/storage.go +++ b/packages/orchestrator/internal/sandbox/template/storage.go @@ -2,6 +2,7 @@ package template import ( "context" + "errors" "fmt" "github.com/google/uuid" @@ -9,7 +10,11 @@ import ( "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" +) + +const ( + oldMemfileHugePageSize = 2 << 20 // 2 MiB + oldRootfsBlockSize = 2 << 11 // 4 KiB ) type Storage struct { @@ -22,22 +27,35 @@ func NewStorage( store *build.DiffStore, buildId string, fileType build.DiffType, - blockSize int64, - isSnapshot bool, h *header.Header, - bucket *s3.BucketHandle, + persistence storage.StorageProvider, ) (*Storage, error) { - if isSnapshot && h == nil { - headerObject := s3.NewObject(ctx, bucket, buildId+"/"+string(fileType)+storage.HeaderSuffix) + if h == nil { + headerObjectPath := buildId + "/" + string(fileType) + storage.HeaderSuffix + headerObject, err := persistence.OpenObject(ctx, headerObjectPath) + if err != nil { + return nil, err + } diffHeader, err := header.Deserialize(headerObject) - if err != nil { + + // If we can't find the diff header in storage, we switch to templates without a headers + if err != nil && !errors.Is(err, storage.ErrorObjectNotExist) { return nil, fmt.Errorf("failed to deserialize header: %w", err) } - h = diffHeader - } else if h == nil { - object := s3.NewObject(ctx, bucket, buildId+"/"+string(fileType)) + if err == nil { + h = diffHeader + } + } + + // If we can't find the diff header in storage, we try to find the "old" style template without a header as a fallback. + if h == nil { + objectPath := buildId + "/" + string(fileType) + object, err := persistence.OpenObject(ctx, objectPath) + if err != nil { + return nil, err + } size, err := object.Size() if err != nil { @@ -49,19 +67,29 @@ func NewStorage( return nil, fmt.Errorf("failed to parse build id: %w", err) } - metadata := &header.Metadata{ - Version: 1, - BlockSize: uint64(blockSize), - Size: uint64(size), - Generation: 1, - BuildId: id, - BaseBuildId: id, + // TODO: This is a workaround for the old style template without a header. + // We don't know the block size of the old style template, so we set it manually. + var blockSize uint64 + switch fileType { + case build.Memfile: + blockSize = oldMemfileHugePageSize + case build.Rootfs: + blockSize = oldRootfsBlockSize + default: + return nil, fmt.Errorf("unsupported file type: %s", fileType) } - h = header.NewHeader(metadata, nil) + h = header.NewHeader(&header.Metadata{ + BuildId: id, + BaseBuildId: id, + Size: uint64(size), + Version: 1, + BlockSize: blockSize, + Generation: 1, + }, nil) } - b := build.NewFile(h, store, fileType) + b := build.NewFile(h, store, fileType, persistence) return &Storage{ source: b, @@ -77,6 +105,10 @@ func (d *Storage) Size() (int64, error) { return int64(d.header.Metadata.Size), nil } +func (d *Storage) BlockSize() int64 { + return int64(d.header.Metadata.BlockSize) +} + func (d *Storage) Slice(off, length int64) ([]byte, error) { return d.source.Slice(off, length) } @@ -84,3 +116,7 @@ func (d *Storage) Slice(off, length int64) ([]byte, error) { func (d *Storage) Header() *header.Header { return d.header } + +func (d *Storage) Close() error { + return nil +} diff --git a/packages/orchestrator/internal/sandbox/template/storage_file.go b/packages/orchestrator/internal/sandbox/template/storage_file.go index afb8da2..45eff47 100644 --- a/packages/orchestrator/internal/sandbox/template/storage_file.go +++ b/packages/orchestrator/internal/sandbox/template/storage_file.go @@ -6,7 +6,7 @@ import ( "fmt" "os" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" ) type storageFile struct { @@ -15,8 +15,8 @@ type storageFile struct { func newStorageFile( ctx context.Context, - bucket *s3.BucketHandle, - bucketObjectPath string, + persistence storage.StorageProvider, + objectPath string, path string, ) (*storageFile, error) { f, err := os.Create(path) @@ -26,13 +26,15 @@ func newStorageFile( defer f.Close() - object := s3.NewObject(ctx, bucket, bucketObjectPath) + object, err := persistence.OpenObject(ctx, objectPath) + if err != nil { + return nil, err + } _, err = object.WriteTo(f) if err != nil { cleanupErr := os.Remove(path) - - return nil, fmt.Errorf("failed to write to file: %w", errors.Join(err, cleanupErr)) + return nil, fmt.Errorf("NEW STORAGE failed to write to file: %w", errors.Join(err, cleanupErr)) } return &storageFile{ @@ -45,5 +47,5 @@ func (f *storageFile) Path() string { } func (f *storageFile) Close() error { - return os.Remove(f.path) + return os.RemoveAll(f.path) } diff --git a/packages/orchestrator/internal/sandbox/template/storage_template.go b/packages/orchestrator/internal/sandbox/template/storage_template.go index c2b6fa2..eea9354 100644 --- a/packages/orchestrator/internal/sandbox/template/storage_template.go +++ b/packages/orchestrator/internal/sandbox/template/storage_template.go @@ -3,30 +3,27 @@ package template import ( "context" "fmt" - "os" "sync" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" "github.com/e2b-dev/infra/packages/shared/pkg/utils" ) type storageTemplate struct { files *storage.TemplateCacheFiles - memfile *utils.SetOnce[*Storage] - rootfs *utils.SetOnce[*Storage] + memfile *utils.SetOnce[block.ReadonlyDevice] + rootfs *utils.SetOnce[block.ReadonlyDevice] snapfile *utils.SetOnce[File] - isSnapshot bool - memfileHeader *header.Header rootfsHeader *header.Header - localSnapfile *LocalFile + localSnapfile *LocalFileLink - bucket *s3.BucketHandle + persistence storage.StorageProvider } func newTemplateFromStorage( @@ -34,19 +31,16 @@ func newTemplateFromStorage( buildId, kernelVersion, firecrackerVersion string, - hugePages bool, - isSnapshot bool, memfileHeader *header.Header, rootfsHeader *header.Header, - bucket *s3.BucketHandle, - localSnapfile *LocalFile, + persistence storage.StorageProvider, + localSnapfile *LocalFileLink, ) (*storageTemplate, error) { files, err := storage.NewTemplateFiles( templateId, buildId, kernelVersion, firecrackerVersion, - hugePages, ).NewTemplateCacheFiles() if err != nil { return nil, fmt.Errorf("failed to create template cache files: %w", err) @@ -55,28 +49,16 @@ func newTemplateFromStorage( return &storageTemplate{ files: files, localSnapfile: localSnapfile, - isSnapshot: isSnapshot, memfileHeader: memfileHeader, rootfsHeader: rootfsHeader, - bucket: bucket, - memfile: utils.NewSetOnce[*Storage](), - rootfs: utils.NewSetOnce[*Storage](), + persistence: persistence, + memfile: utils.NewSetOnce[block.ReadonlyDevice](), + rootfs: utils.NewSetOnce[block.ReadonlyDevice](), snapfile: utils.NewSetOnce[File](), }, nil } func (t *storageTemplate) Fetch(ctx context.Context, buildStore *build.DiffStore) { - err := os.MkdirAll(t.files.CacheDir(), os.ModePerm) - if err != nil { - errMsg := fmt.Errorf("failed to create directory %s: %w", t.files.CacheDir(), err) - - t.memfile.SetError(errMsg) - t.rootfs.SetError(errMsg) - t.snapfile.SetError(errMsg) - - return - } - var wg sync.WaitGroup wg.Add(1) @@ -88,7 +70,7 @@ func (t *storageTemplate) Fetch(ctx context.Context, buildStore *build.DiffStore snapfile, snapfileErr := newStorageFile( ctx, - t.bucket, + t.persistence, t.files.StorageSnapfilePath(), t.files.CacheSnapfilePath(), ) @@ -110,11 +92,10 @@ func (t *storageTemplate) Fetch(ctx context.Context, buildStore *build.DiffStore buildStore, t.files.BuildId, build.Memfile, - t.files.MemfilePageSize(), - t.isSnapshot, t.memfileHeader, - t.bucket, + t.persistence, ) + if memfileErr != nil { errMsg := fmt.Errorf("failed to create memfile storage: %w", memfileErr) @@ -133,10 +114,8 @@ func (t *storageTemplate) Fetch(ctx context.Context, buildStore *build.DiffStore buildStore, t.files.BuildId, build.Rootfs, - t.files.RootfsBlockSize(), - t.isSnapshot, t.rootfsHeader, - t.bucket, + t.persistence, ) if rootfsErr != nil { errMsg := fmt.Errorf("failed to create rootfs storage: %w", rootfsErr) @@ -158,11 +137,11 @@ func (t *storageTemplate) Files() *storage.TemplateCacheFiles { return t.files } -func (t *storageTemplate) Memfile() (*Storage, error) { +func (t *storageTemplate) Memfile() (block.ReadonlyDevice, error) { return t.memfile.Wait() } -func (t *storageTemplate) Rootfs() (*Storage, error) { +func (t *storageTemplate) Rootfs() (block.ReadonlyDevice, error) { return t.rootfs.Wait() } diff --git a/packages/orchestrator/internal/sandbox/template/template.go b/packages/orchestrator/internal/sandbox/template/template.go index 62bfaca..bafe91d 100644 --- a/packages/orchestrator/internal/sandbox/template/template.go +++ b/packages/orchestrator/internal/sandbox/template/template.go @@ -2,25 +2,54 @@ package template import ( "errors" + "fmt" + "io" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" "github.com/e2b-dev/infra/packages/shared/pkg/storage" ) type Template interface { Files() *storage.TemplateCacheFiles - Memfile() (*Storage, error) - Rootfs() (*Storage, error) + Memfile() (block.ReadonlyDevice, error) + Rootfs() (block.ReadonlyDevice, error) Snapfile() (File, error) Close() error } -func closeTemplate(t Template) error { - var errs []error +func closeTemplate(t Template) (e error) { + closable := make([]io.Closer, 0) + + memfile, err := t.Memfile() + if err != nil { + e = errors.Join(e, err) + } else { + closable = append(closable, memfile) + } + + rootfs, err := t.Rootfs() + if err != nil { + e = errors.Join(e, err) + } else { + closable = append(closable, rootfs) + } snapfile, err := t.Snapfile() - if err == nil { - errs = append(errs, snapfile.Close()) + if err != nil { + e = errors.Join(e, err) + } else { + closable = append(closable, snapfile) + } + + for _, c := range closable { + if err := c.Close(); err != nil { + e = errors.Join(e, err) + } + } + + if e != nil { + return fmt.Errorf("error closing template: %w", e) } - return errors.Join(errs...) + return nil } diff --git a/packages/orchestrator/internal/sandbox/uffd/handler.go b/packages/orchestrator/internal/sandbox/uffd/handler.go index 4f55b28..136ebbd 100644 --- a/packages/orchestrator/internal/sandbox/uffd/handler.go +++ b/packages/orchestrator/internal/sandbox/uffd/handler.go @@ -10,9 +10,11 @@ import ( "syscall" "time" - "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" - "github.com/bits-and-blooms/bitset" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" ) const ( @@ -31,13 +33,13 @@ func (u *Uffd) TrackAndReturnNil() error { } type Uffd struct { - Exit chan error - Ready chan struct{} + exitCh chan error + readyCh chan struct{} exitReader *os.File exitWriter *os.File - Stop func() error + stopFn func() error lis *net.UnixListener @@ -65,13 +67,13 @@ func New(memfile block.ReadonlyDevice, socketPath string, blockSize int64) (*Uff } return &Uffd{ - Exit: make(chan error, 1), - Ready: make(chan struct{}, 1), + exitCh: make(chan error, 1), + readyCh: make(chan struct{}, 1), exitReader: pRead, exitWriter: pWrite, memfile: trackedMemfile, socketPath: socketPath, - Stop: sync.OnceValue(func() error { + stopFn: sync.OnceValue(func() error { _, writeErr := pWrite.Write([]byte{0}) if writeErr != nil { return fmt.Errorf("failed write to exit writer: %w", writeErr) @@ -101,10 +103,10 @@ func (u *Uffd) Start(sandboxId string) error { closeErr := u.lis.Close() writerErr := u.exitWriter.Close() - u.Exit <- errors.Join(handleErr, closeErr, writerErr) + u.exitCh <- errors.Join(handleErr, closeErr, writerErr) - close(u.Ready) - close(u.Exit) + close(u.readyCh) + close(u.exitCh) }() return nil @@ -174,16 +176,35 @@ func (u *Uffd) handle(sandboxId string) (err error) { defer func() { closeErr := syscall.Close(int(uffd)) if closeErr != nil { - fmt.Fprintf(os.Stderr, "[sandbox %s]: failed to close uffd at path %s: %v\n", sandboxId, u.socketPath, closeErr) + zap.L().Error("failed to close uffd", logger.WithSandboxID(sandboxId), zap.String("socket_path", u.socketPath), zap.Error(closeErr)) } }() - u.Ready <- struct{}{} + u.readyCh <- struct{}{} - err = Serve(int(uffd), setup.Mappings, u.memfile, u.exitReader.Fd(), u.Stop, sandboxId) + err = Serve( + int(uffd), + setup.Mappings, + u.memfile, + u.exitReader.Fd(), + u.Stop, + sandboxId, + ) if err != nil { return fmt.Errorf("failed handling uffd: %w", err) } return nil } + +func (u *Uffd) Stop() error { + return u.stopFn() +} + +func (u *Uffd) Ready() chan struct{} { + return u.readyCh +} + +func (u *Uffd) Exit() chan error { + return u.exitCh +} diff --git a/packages/orchestrator/internal/sandbox/uffd/memory_backend.go b/packages/orchestrator/internal/sandbox/uffd/memory_backend.go new file mode 100644 index 0000000..776c5b2 --- /dev/null +++ b/packages/orchestrator/internal/sandbox/uffd/memory_backend.go @@ -0,0 +1,13 @@ +package uffd + +import "github.com/bits-and-blooms/bitset" + +type MemoryBackend interface { + Disable() error + Dirty() *bitset.BitSet + + Start(sandboxId string) error + Stop() error + Ready() chan struct{} + Exit() chan error +} diff --git a/packages/orchestrator/internal/sandbox/uffd/noop.go b/packages/orchestrator/internal/sandbox/uffd/noop.go new file mode 100644 index 0000000..a52bd8f --- /dev/null +++ b/packages/orchestrator/internal/sandbox/uffd/noop.go @@ -0,0 +1,53 @@ +package uffd + +import ( + "github.com/bits-and-blooms/bitset" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" +) + +type NoopMemory struct { + size int64 + blockSize int64 + + dirty *bitset.BitSet +} + +func NewNoopMemory(size, blockSize int64) *NoopMemory { + blocks := header.TotalBlocks(size, blockSize) + + dirty := bitset.New(uint(blocks)) + dirty.FlipRange(0, dirty.Len()) + + return &NoopMemory{ + size: size, + blockSize: blockSize, + dirty: dirty, + } +} + +func (m *NoopMemory) Disable() error { + return nil +} + +func (m *NoopMemory) Dirty() *bitset.BitSet { + return m.dirty +} + +func (m *NoopMemory) Start(sandboxId string) error { + return nil +} + +func (m *NoopMemory) Stop() error { + return nil +} + +func (m *NoopMemory) Ready() chan struct{} { + ch := make(chan struct{}) + ch <- struct{}{} + return ch +} + +func (m *NoopMemory) Exit() chan error { + return make(chan error) +} diff --git a/packages/orchestrator/internal/sandbox/uffd/serve.go b/packages/orchestrator/internal/sandbox/uffd/serve_linux.go similarity index 50% rename from packages/orchestrator/internal/sandbox/uffd/serve.go rename to packages/orchestrator/internal/sandbox/uffd/serve_linux.go index f9866f7..1cd2e15 100644 --- a/packages/orchestrator/internal/sandbox/uffd/serve.go +++ b/packages/orchestrator/internal/sandbox/uffd/serve_linux.go @@ -1,22 +1,21 @@ +//go:build linux +// +build linux + package uffd import ( "errors" "fmt" "syscall" - "time" "unsafe" "github.com/loopholelabs/userfaultfd-go/pkg/constants" + "go.uber.org/zap" "golang.org/x/sync/errgroup" "golang.org/x/sys/unix" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" -) - -const ( - maxEagainAttempts = 4096 - eagainDelay = 50 * time.Microsecond + "github.com/e2b-dev/infra/packages/shared/pkg/logger" ) var ErrUnexpectedEventType = errors.New("unexpected event type") @@ -30,7 +29,8 @@ type GuestRegionUffdMapping struct { func getMapping(addr uintptr, mappings []GuestRegionUffdMapping) (*GuestRegionUffdMapping, error) { for _, m := range mappings { - if !(addr >= m.BaseHostVirtAddr && addr < m.BaseHostVirtAddr+m.Size) { + if addr < m.BaseHostVirtAddr || m.BaseHostVirtAddr+m.Size <= addr { + // Outside the mapping continue } @@ -40,7 +40,14 @@ func getMapping(addr uintptr, mappings []GuestRegionUffdMapping) (*GuestRegionUf return nil, fmt.Errorf("address %d not found in any mapping", addr) } -func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceDevice, fd uintptr, stop func() error, sandboxId string) error { +func Serve( + uffd int, + mappings []GuestRegionUffdMapping, + src *block.TrackedSliceDevice, + fd uintptr, + stop func() error, + sandboxId string, +) error { pollFds := []unix.PollFd{ {Fd: int32(uffd), Events: unix.POLLIN}, {Fd: int32(fd), Events: unix.POLLIN}, @@ -48,15 +55,26 @@ func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceD var eg errgroup.Group +outerLoop: for { if _, err := unix.Poll( pollFds, -1, ); err != nil { if err == unix.EINTR { + zap.L().Debug("uffd: interrupted polling, going back to polling", logger.WithSandboxID(sandboxId)) + + continue + } + + if err == unix.EAGAIN { + zap.L().Debug("uffd: eagain during polling, going back to polling", logger.WithSandboxID(sandboxId)) + continue } + zap.L().Error("UFFD serve polling error", logger.WithSandboxID(sandboxId), zap.Error(err)) + return fmt.Errorf("failed polling: %w", err) } @@ -64,40 +82,60 @@ func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceD if exitFd.Revents&unix.POLLIN != 0 { errMsg := eg.Wait() if errMsg != nil { + zap.L().Warn("UFFD fd exit error while waiting for goroutines to finish", logger.WithSandboxID(sandboxId), zap.Error(errMsg)) + return fmt.Errorf("failed to handle uffd: %w", errMsg) } return nil } - buf := make([]byte, unsafe.Sizeof(constants.UffdMsg{})) + uffdFd := pollFds[0] + if uffdFd.Revents&unix.POLLIN == 0 { + // Uffd is not ready for reading as there is nothing to read on the fd. + // https://github.com/firecracker-microvm/firecracker/issues/5056 + // https://elixir.bootlin.com/linux/v6.8.12/source/fs/userfaultfd.c#L1149 + // TODO: Check for all the errors + // - https://docs.kernel.org/admin-guide/mm/userfaultfd.html + // - https://elixir.bootlin.com/linux/v6.8.12/source/fs/userfaultfd.c + // - https://man7.org/linux/man-pages/man2/userfaultfd.2.html + // It might be possible to just check for data != 0 in the syscall.Read loop + // but I don't feel confident about doing that. + zap.L().Debug("uffd: no data in fd, going back to polling", logger.WithSandboxID(sandboxId)) - var i int + continue + } + + buf := make([]byte, unsafe.Sizeof(constants.UffdMsg{})) for { - _, err := syscall.Read(uffd, buf) + n, err := syscall.Read(uffd, buf) + if err == syscall.EINTR { + zap.L().Debug("uffd: interrupted read, reading again", logger.WithSandboxID(sandboxId)) + + continue + } + if err == nil { + // There is no error so we can proceed. break } if err == syscall.EAGAIN { - if i > maxEagainAttempts { - return fmt.Errorf("too many uffd read attempts, last error: %w\n", err) - } - - i++ + zap.L().Debug("uffd: eagain error, going back to polling", logger.WithSandboxID(sandboxId), zap.Error(err), zap.Int("read_bytes", n)) - time.Sleep(eagainDelay) - - continue + // Continue polling the fd. + continue outerLoop } + zap.L().Error("uffd: read error", logger.WithSandboxID(sandboxId), zap.Error(err)) + return fmt.Errorf("failed to read: %w", err) } msg := (*(*constants.UffdMsg)(unsafe.Pointer(&buf[0]))) if constants.GetMsgEvent(&msg) != constants.UFFD_EVENT_PAGEFAULT { - stop() + zap.L().Error("UFFD serve unexpected event type", logger.WithSandboxID(sandboxId), zap.Any("event_type", constants.GetMsgEvent(&msg))) return ErrUnexpectedEventType } @@ -109,7 +147,7 @@ func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceD mapping, err := getMapping(uintptr(addr), mappings) if err != nil { - stop() + zap.L().Error("UFFD serve get mapping error", logger.WithSandboxID(sandboxId), zap.Error(err)) return fmt.Errorf("failed to map: %w", err) } @@ -120,12 +158,18 @@ func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceD eg.Go(func() error { defer func() { if r := recover(); r != nil { + zap.L().Error("UFFD serve panic", logger.WithSandboxID(sandboxId), zap.Any("offset", offset), zap.Any("pagesize", pagesize), zap.Any("panic", r)) fmt.Printf("[sandbox %s]: recovered from panic in uffd serve (offset: %d, pagesize: %d): %v\n", sandboxId, offset, pagesize, r) } }() b, err := src.Slice(offset, pagesize) if err != nil { + + stop() + + zap.L().Error("UFFD serve slice error", logger.WithSandboxID(sandboxId), zap.Error(err)) + return fmt.Errorf("failed to read from source: %w", err) } @@ -144,12 +188,16 @@ func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceD uintptr(unsafe.Pointer(&cpy)), ); errno != 0 { if errno == unix.EEXIST { + zap.L().Debug("UFFD serve page already mapped", logger.WithSandboxID(sandboxId), zap.Any("offset", offset), zap.Any("pagesize", pagesize)) + // Page is already mapped return nil } stop() + zap.L().Error("UFFD serve uffdio copy error", logger.WithSandboxID(sandboxId), zap.Error(err)) + return fmt.Errorf("failed uffdio copy %w", errno) } diff --git a/packages/orchestrator/internal/sandbox/uffd/serve_other.go b/packages/orchestrator/internal/sandbox/uffd/serve_other.go new file mode 100644 index 0000000..08b473c --- /dev/null +++ b/packages/orchestrator/internal/sandbox/uffd/serve_other.go @@ -0,0 +1,23 @@ +//go:build !linux +// +build !linux + +package uffd + +import ( + "errors" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" +) + +var ErrUnexpectedEventType = errors.New("unexpected event type") + +type GuestRegionUffdMapping struct { + BaseHostVirtAddr uintptr `json:"base_host_virt_addr"` + Size uintptr `json:"size"` + Offset uintptr `json:"offset"` + PageSize uintptr `json:"page_size_kib"` +} + +func Serve(uffd int, mappings []GuestRegionUffdMapping, src *block.TrackedSliceDevice, fd uintptr, stop func() error, sandboxId string) error { + return errors.New("platform does not support UFFD") +} diff --git a/packages/orchestrator/internal/server/main.go b/packages/orchestrator/internal/server/main.go index b7e1e97..4427a1a 100644 --- a/packages/orchestrator/internal/server/main.go +++ b/packages/orchestrator/internal/server/main.go @@ -2,207 +2,106 @@ package server import ( "context" - "errors" "fmt" - "log" - "math" - "net" - "os" "sync" - "github.com/e2b-dev/infra/packages/shared/pkg/env" - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/logging" - - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" + "go.uber.org/zap" - "github.com/e2b-dev/infra/packages/orchestrator/internal/dns" + "github.com/e2b-dev/infra/packages/orchestrator/internal/grpcserver" + "github.com/e2b-dev/infra/packages/orchestrator/internal/proxy" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" - e2bgrpc "github.com/e2b-dev/infra/packages/shared/pkg/grpc" + "github.com/e2b-dev/infra/packages/orchestrator/internal/service" + featureflags "github.com/e2b-dev/infra/packages/shared/pkg/feature-flags" "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" - e2blogging "github.com/e2b-dev/infra/packages/shared/pkg/logging" "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" ) -const ServiceName = "orchestrator" - type server struct { orchestrator.UnimplementedSandboxServiceServer + + info *service.ServiceInfo sandboxes *smap.Map[*sandbox.Sandbox] - dns *dns.DNS + proxy *proxy.SandboxProxy tracer trace.Tracer networkPool *network.Pool templateCache *template.Cache - - pauseMu sync.Mutex + pauseMu sync.Mutex + devicePool *nbd.DevicePool + persistence storage.StorageProvider + featureFlags *featureflags.Client } type Service struct { + info *service.ServiceInfo server *server - grpc *grpc.Server - dns *dns.DNS - port uint16 + proxy *proxy.SandboxProxy shutdown struct { once sync.Once op func(context.Context) error err error } + + persistence storage.StorageProvider } -func New(ctx context.Context, port uint) (*Service, error) { - if port > math.MaxUint16 { - return nil, fmt.Errorf("%d is larger than maximum possible port %d", port, math.MaxInt16) - } - log.Printf("port finish") - srv := &Service{port: uint16(port)} - log.Printf("Service finish") - - log.Printf("Using GCS as storage provider") - if os.Getenv("TEMPLATE_BUCKET_NAME") == "" { - log.Printf("Warning: TEMPLATE_BUCKET_NAME environment variable is not set") - } else { - log.Printf("GCS configuration verified - using bucket: %s", - os.Getenv("TEMPLATE_BUCKET_NAME")) - } +func New( + ctx context.Context, + grpc *grpcserver.GRPCServer, + tel *telemetry.Client, + networkPool *network.Pool, + devicePool *nbd.DevicePool, + tracer trace.Tracer, + info *service.ServiceInfo, + proxy *proxy.SandboxProxy, + sandboxes *smap.Map[*sandbox.Sandbox], + featureFlags *featureflags.Client, +) (*Service, error) { + srv := &Service{info: info} templateCache, err := template.NewCache(ctx) if err != nil { return nil, fmt.Errorf("failed to create template cache: %w", err) } - log.Printf("templateCache finish") - networkPool, err := network.NewPool(ctx, network.NewSlotsPoolSize, network.ReusedSlotsPoolSize) - if err != nil { - return nil, fmt.Errorf("failed to create network pool: %w", err) - } + srv.proxy = proxy - log.Printf("networkPool finish") - - loggerSugar, err := e2blogging.New(env.IsLocal()) + persistence, err := storage.GetTemplateStorageProvider(ctx) if err != nil { - return nil, fmt.Errorf("initializing logger: %w", err) - } - - log.Printf("loggerSugar finish") - - logger := loggerSugar.Desugar() - - // BLOCK: initialize services - { - log.Printf("into dns") - srv.dns = dns.New() - - opts := []grpc_zap.Option{e2blogging.WithoutHealthCheck()} - - srv.grpc = grpc.NewServer( - grpc.StatsHandler(e2bgrpc.NewStatsWrapper(otelgrpc.NewServerHandler())), - grpc.ChainUnaryInterceptor( - recovery.UnaryServerInterceptor(), - grpc_zap.UnaryServerInterceptor(logger, opts...), - grpc_zap.PayloadUnaryServerInterceptor(logger, withoutHealthCheckPayload()), - ), - grpc.ChainStreamInterceptor( - grpc_zap.StreamServerInterceptor(logger, opts...), - grpc_zap.PayloadStreamServerInterceptor(logger, withoutHealthCheckPayload()), - ), - ) - log.Printf("grpc finish") - - srv.server = &server{ - tracer: otel.Tracer(ServiceName), - dns: srv.dns, - sandboxes: smap.New[*sandbox.Sandbox](), - networkPool: networkPool, - templateCache: templateCache, - } - - log.Printf("srv.server finish") + return nil, fmt.Errorf("failed to create storage provider: %w", err) } - orchestrator.RegisterSandboxServiceServer(srv.grpc, srv.server) - log.Printf("orchestrator.RegisterSandboxServiceServer finish") - grpc_health_v1.RegisterHealthServer(srv.grpc, health.NewServer()) - log.Printf("grpc_health_v1 finish") - - return srv, nil -} - -// Start launches -func (srv *Service) Start(context.Context) error { - if srv.server == nil || srv.dns == nil || srv.grpc == nil { - return errors.New("orchestrator services are not initialized") + srv.persistence = persistence + + srv.server = &server{ + info: info, + tracer: tracer, + proxy: srv.proxy, + sandboxes: sandboxes, + networkPool: networkPool, + templateCache: templateCache, + devicePool: devicePool, + persistence: persistence, + featureFlags: featureFlags, } - go func() { - log.Printf("Starting DNS server") - if err := srv.dns.Start("127.0.0.4", 53); err != nil { - log.Panic(fmt.Errorf("Failed running DNS server: %w", err)) - } - }() + meter := tel.MeterProvider.Meter("orchestrator.sandbox") + _, err = telemetry.GetObservableUpDownCounter(meter, telemetry.OrchestratorSandboxCountMeterName, func(ctx context.Context, observer metric.Int64Observer) error { + observer.Observe(int64(srv.server.sandboxes.Count())) - // the listener is closed by the shutdown operation - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", srv.port)) + return nil + }) if err != nil { - return fmt.Errorf("failed to listen on port %d: %w", srv.port, err) + zap.L().Error("Error registering sandbox count metric", zap.Any("metric_name", telemetry.OrchestratorSandboxCountMeterName), zap.Error(err)) } - log.Printf("starting server on port %d", srv.port) + orchestrator.RegisterSandboxServiceServer(grpc.GRPCServer(), srv.server) - go func() { - if err := srv.grpc.Serve(lis); err != nil { - log.Panic(fmt.Errorf("failed to serve: %w", err)) - } - }() - - srv.shutdown.op = func(ctx context.Context) error { - var errs []error - - srv.grpc.GracefulStop() - - if err := lis.Close(); err != nil { - errs = append(errs, err) - } - - if err := srv.dns.Close(ctx); err != nil { - errs = append(errs, err) - } - - return errors.Join(errs...) - } - - return nil -} - -func (srv *Service) Close(ctx context.Context) error { - srv.shutdown.once.Do(func() { - if srv.shutdown.op == nil { - // should only be true if there was an error - // during startup. - return - } - - srv.shutdown.err = srv.shutdown.op(ctx) - srv.shutdown.op = nil - }) - return srv.shutdown.err -} - -func withoutHealthCheckPayload() grpc_logging.ServerPayloadLoggingDecider { - return func(ctx context.Context, fullMethodName string, servingObject interface{}) bool { - // will not log gRPC calls if it was a call to healthcheck and no error was raised - if fullMethodName == "/grpc.health.v1.Health/Check" { - return false - } - - // by default everything will be logged - return true - } + return srv, nil } diff --git a/packages/orchestrator/internal/server/sandboxes.go b/packages/orchestrator/internal/server/sandboxes.go index 734c690..467c5a4 100644 --- a/packages/orchestrator/internal/server/sandboxes.go +++ b/packages/orchestrator/internal/server/sandboxes.go @@ -4,31 +4,30 @@ import ( "context" "errors" "fmt" - "log" - "os" - "sync" "time" + "github.com/google/uuid" + "github.com/launchdarkly/go-sdk-common/v3/ldcontext" "go.opentelemetry.io/otel/attribute" - "golang.org/x/sync/semaphore" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" - "github.com/e2b-dev/infra/packages/orchestrator/internal/consul" + "github.com/e2b-dev/infra/packages/orchestrator/internal/config" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/build" + featureflags "github.com/e2b-dev/infra/packages/shared/pkg/feature-flags" "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" - "github.com/e2b-dev/infra/packages/shared/pkg/logs" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" "github.com/e2b-dev/infra/packages/shared/pkg/storage" "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" ) const ( requestTimeout = 60 * time.Second - - maxParalellSnapshotting = 8 ) func (s *server) Create(ctxConn context.Context, req *orchestrator.SandboxCreateRequest) (*orchestrator.SandboxCreateResponse, error) { @@ -39,66 +38,86 @@ func (s *server) Create(ctxConn context.Context, req *orchestrator.SandboxCreate defer childSpan.End() childSpan.SetAttributes( - attribute.String("template.id", req.Sandbox.TemplateId), + telemetry.WithTemplateID(req.Sandbox.TemplateId), attribute.String("kernel.version", req.Sandbox.KernelVersion), - attribute.String("sandbox.id", req.Sandbox.SandboxId), - attribute.String("client.id", consul.ClientID), + telemetry.WithSandboxID(req.Sandbox.SandboxId), + attribute.String("client.id", s.info.ClientId), attribute.String("envd.version", req.Sandbox.EnvdVersion), ) - logger := logs.NewSandboxLogger( - req.Sandbox.SandboxId, - req.Sandbox.TemplateId, - req.Sandbox.TeamId, - req.Sandbox.Vcpu, - req.Sandbox.RamMb, - false, - ) + // TODO: Temporary workaround, remove API changes deployed + if req.Sandbox.GetExecutionId() == "" { + req.Sandbox.ExecutionId = uuid.New().String() + } + + flagCtx := ldcontext.NewBuilder(featureflags.MetricsWriteFlagName).SetString("sandbox_id", req.Sandbox.SandboxId).Build() + metricsWriteFlag, flagErr := s.featureFlags.Ld.BoolVariation(featureflags.MetricsWriteFlagName, flagCtx, featureflags.MetricsWriteDefault) + if flagErr != nil { + zap.L().Error("soft failing during metrics write feature flag receive", zap.Error(flagErr)) + } - sbx, cleanup, err := sandbox.NewSandbox( + sbx, cleanup, err := sandbox.ResumeSandbox( childCtx, s.tracer, - s.dns, s.networkPool, s.templateCache, req.Sandbox, childSpan.SpanContext().TraceID().String(), req.StartTime.AsTime(), req.EndTime.AsTime(), - logger, - req.Sandbox.Snapshot, req.Sandbox.BaseTemplateId, + s.devicePool, + config.AllowSandboxInternet, + metricsWriteFlag, ) if err != nil { - log.Printf("failed to create sandbox -> clean up: %v", err) - cleanupErr := cleanup.Run() + zap.L().Error("failed to create sandbox, cleaning up", zap.Error(err)) + cleanupErr := cleanup.Run(ctx) - errMsg := fmt.Errorf("failed to create sandbox: %w", errors.Join(err, context.Cause(ctx), cleanupErr)) - telemetry.ReportCriticalError(ctx, errMsg) + err := errors.Join(err, context.Cause(ctx), cleanupErr) + telemetry.ReportCriticalError(ctx, "failed to cleanup sandbox", err) - return nil, status.New(codes.Internal, errMsg.Error()).Err() + return nil, status.Errorf(codes.Internal, "failed to cleanup sandbox: %s", err) } s.sandboxes.Insert(req.Sandbox.SandboxId, sbx) - go func() { - waitErr := sbx.Wait() + ctx, childSpan := s.tracer.Start(context.Background(), "sandbox-create-stop") + defer childSpan.End() + + waitErr := sbx.Wait(ctx) if waitErr != nil { - fmt.Fprintf(os.Stderr, "failed to wait for Sandbox: %v\n", waitErr) + sbxlogger.I(sbx).Error("failed to wait for sandbox, cleaning up", zap.Error(waitErr)) } - cleanupErr := cleanup.Run() + cleanupErr := cleanup.Run(ctx) if cleanupErr != nil { - fmt.Fprintf(os.Stderr, "failed to cleanup Sandbox: %v\n", cleanupErr) + sbxlogger.I(sbx).Error("failed to cleanup sandbox, will remove from cache", zap.Error(cleanupErr)) } - s.sandboxes.Remove(req.Sandbox.SandboxId) + // Remove the sandbox from cache only if the cleanup IDs match. + // This prevents us from accidentally removing started sandbox (via resume) from the cache if cleanup is taking longer than the request timeout. + // This could have caused the "invisible" sandboxes that are not in orchestrator or API, but are still on client. + s.sandboxes.RemoveCb(req.Sandbox.SandboxId, func(_ string, v *sandbox.Sandbox, exists bool) bool { + if !exists { + return false + } - logger.Infof("Sandbox killed") + if v == nil { + return false + } + + return sbx.Config.ExecutionId == v.Config.ExecutionId + }) + + // Remove the proxies assigned to the sandbox from the pool to prevent them from being reused. + s.proxy.RemoveFromPool(sbx.Config.ExecutionId) + + sbxlogger.E(sbx).Info("Sandbox killed") }() return &orchestrator.SandboxCreateResponse{ - ClientId: consul.ClientID, + ClientId: s.info.ClientId, }, nil } @@ -107,16 +126,15 @@ func (s *server) Update(ctx context.Context, req *orchestrator.SandboxUpdateRequ defer childSpan.End() childSpan.SetAttributes( - attribute.String("sandbox.id", req.SandboxId), - attribute.String("client.id", consul.ClientID), + telemetry.WithSandboxID(req.SandboxId), + attribute.String("client.id", s.info.ClientId), ) item, ok := s.sandboxes.Get(req.SandboxId) if !ok { - errMsg := fmt.Errorf("sandbox not found") - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "sandbox not found", nil) - return nil, status.New(codes.NotFound, errMsg.Error()).Err() + return nil, status.Error(codes.NotFound, "sandbox not found") } item.EndAt = req.EndTime.AsTime() @@ -143,7 +161,7 @@ func (s *server) List(ctx context.Context, _ *emptypb.Empty) (*orchestrator.Sand sandboxes = append(sandboxes, &orchestrator.RunningSandbox{ Config: sbx.Config, - ClientId: consul.ClientID, + ClientId: s.info.ClientId, StartTime: timestamppb.New(sbx.StartedAt), EndTime: timestamppb.New(sbx.EndAt), }) @@ -162,74 +180,50 @@ func (s *server) Delete(ctxConn context.Context, in *orchestrator.SandboxDeleteR defer childSpan.End() childSpan.SetAttributes( - attribute.String("sandbox.id", in.SandboxId), - attribute.String("client.id", consul.ClientID), + telemetry.WithSandboxID(in.SandboxId), + attribute.String("client.id", s.info.ClientId), ) sbx, ok := s.sandboxes.Get(in.SandboxId) if !ok { - errMsg := fmt.Errorf("sandbox '%s' not found", in.SandboxId) - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "sandbox not found", nil, telemetry.WithSandboxID(in.SandboxId)) - return nil, status.New(codes.NotFound, errMsg.Error()).Err() + return nil, status.Errorf(codes.NotFound, "sandbox '%s' not found", in.SandboxId) } - // Don't allow connecting to the sandbox anymore. - s.dns.Remove(in.SandboxId, sbx.Slot.HostIP()) - // Remove the sandbox from the cache to prevent loading it again in API during the time the instance is stopping. // Old comment: // Ensure the sandbox is removed from cache. // Ideally we would rely only on the goroutine defer. + // Don't allow connecting to the sandbox anymore. s.sandboxes.Remove(in.SandboxId) - loggingCtx, cancelLogginCtx := context.WithTimeout(ctx, 2*time.Second) - defer cancelLogginCtx() - // Check health metrics before stopping the sandbox - sbx.Healthcheck(loggingCtx, true) - sbx.LogMetrics(loggingCtx) + sbx.Checks.Healthcheck(true) - err := sbx.Stop() + err := sbx.Stop(ctx) if err != nil { - fmt.Fprintf(os.Stderr, "error stopping sandbox '%s': %v\n", in.SandboxId, err) + sbxlogger.I(sbx).Error("error stopping sandbox", logger.WithSandboxID(in.SandboxId), zap.Error(err)) } return &emptypb.Empty{}, nil } -var pauseQueue = semaphore.NewWeighted(maxParalellSnapshotting) - func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest) (*emptypb.Empty, error) { ctx, childSpan := s.tracer.Start(ctx, "sandbox-pause") defer childSpan.End() - err := pauseQueue.Acquire(ctx, 1) - if err != nil { - telemetry.ReportCriticalError(ctx, err) - - return nil, status.New(codes.ResourceExhausted, err.Error()).Err() - } - - releaseOnce := sync.OnceFunc(func() { - pauseQueue.Release(1) - }) - - defer releaseOnce() - s.pauseMu.Lock() sbx, ok := s.sandboxes.Get(in.SandboxId) if !ok { s.pauseMu.Unlock() - errMsg := fmt.Errorf("sandbox not found") - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "sandbox not found", nil) - return nil, status.New(codes.NotFound, errMsg.Error()).Err() + return nil, status.Error(codes.NotFound, "sandbox not found") } - s.dns.Remove(in.SandboxId, sbx.Slot.HostIP()) s.sandboxes.Remove(in.SandboxId) s.pauseMu.Unlock() @@ -239,40 +233,32 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest in.BuildId, sbx.Config.KernelVersion, sbx.Config.FirecrackerVersion, - sbx.Config.HugePages, ).NewTemplateCacheFiles() if err != nil { - errMsg := fmt.Errorf("error creating template files: %w", err) - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "error creating template files", err) - return nil, status.New(codes.Internal, errMsg.Error()).Err() + return nil, status.Errorf(codes.Internal, "error creating template files: %s", err) } defer func() { // sbx.Stop sometimes blocks for several seconds, - // so we don't want to block the request and do the cleanup in a goroutine after we already removed sandbox from cache and DNS. + // so we don't want to block the request and do the cleanup in a goroutine after we already removed sandbox from cache and proxy. go func() { - err := sbx.Stop() + ctx, childSpan := s.tracer.Start(context.Background(), "sandbox-pause-stop") + defer childSpan.End() + + err := sbx.Stop(ctx) if err != nil { - fmt.Fprintf(os.Stderr, "error stopping sandbox after snapshot '%s': %v\n", in.SandboxId, err) + sbxlogger.I(sbx).Error("error stopping sandbox after snapshot", logger.WithSandboxID(in.SandboxId), zap.Error(err)) } }() }() - err = os.MkdirAll(snapshotTemplateFiles.CacheDir(), 0o755) - if err != nil { - errMsg := fmt.Errorf("error creating sandbox cache dir '%s': %w", snapshotTemplateFiles.CacheDir(), err) - telemetry.ReportCriticalError(ctx, errMsg) - - return nil, status.New(codes.Internal, errMsg.Error()).Err() - } - - snapshot, err := sbx.Snapshot(ctx, s.tracer, snapshotTemplateFiles, releaseOnce) + snapshot, err := sbx.Pause(ctx, s.tracer, snapshotTemplateFiles) if err != nil { - errMsg := fmt.Errorf("error snapshotting sandbox '%s': %w", in.SandboxId, err) - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "error snapshotting sandbox", err, telemetry.WithSandboxID(in.SandboxId)) - return nil, status.New(codes.Internal, errMsg.Error()).Err() + return nil, status.Errorf(codes.Internal, "error snapshotting sandbox '%s': %s", in.SandboxId, err) } err = s.templateCache.AddSnapshot( @@ -280,7 +266,6 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest snapshotTemplateFiles.BuildId, snapshotTemplateFiles.KernelVersion, snapshotTemplateFiles.FirecrackerVersion, - snapshotTemplateFiles.Hugepages(), snapshot.MemfileDiffHeader, snapshot.RootfsDiffHeader, snapshot.Snapfile, @@ -288,10 +273,9 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest snapshot.RootfsDiff, ) if err != nil { - errMsg := fmt.Errorf("error adding snapshot to template cache: %w", err) - telemetry.ReportCriticalError(ctx, errMsg) + telemetry.ReportCriticalError(ctx, "error adding snapshot to template cache", err) - return nil, status.New(codes.Internal, errMsg.Error()).Err() + return nil, status.Errorf(codes.Internal, "error adding snapshot to template cache: %s", err) } telemetry.ReportEvent(ctx, "added snapshot to template cache") @@ -305,7 +289,7 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest default: memfileLocalPath, err := r.CachePath() if err != nil { - fmt.Fprintf(os.Stderr, "error getting memfile diff path: %v\n", err) + sbxlogger.I(sbx).Error("error getting memfile diff path", zap.Error(err)) return } @@ -321,7 +305,7 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest default: rootfsLocalPath, err := r.CachePath() if err != nil { - fmt.Fprintf(os.Stderr, "error getting rootfs diff path: %v\n", err) + sbxlogger.I(sbx).Error("error getting rootfs diff path", zap.Error(err)) return } @@ -332,17 +316,18 @@ func (s *server) Pause(ctx context.Context, in *orchestrator.SandboxPauseRequest b := storage.NewTemplateBuild( snapshot.MemfileDiffHeader, snapshot.RootfsDiffHeader, + s.persistence, snapshotTemplateFiles.TemplateFiles, ) err = <-b.Upload( context.Background(), - snapshotTemplateFiles.CacheSnapfilePath(), + snapshot.Snapfile.Path(), memfilePath, rootfsPath, ) if err != nil { - fmt.Fprintf(os.Stderr, "error uploading sandbox snapshot '%s': %v\n", in.SandboxId, err) + sbxlogger.I(sbx).Error("error uploading sandbox snapshot", zap.Error(err)) return } diff --git a/packages/orchestrator/internal/server/sandboxes_test.go b/packages/orchestrator/internal/server/sandboxes_test.go new file mode 100644 index 0000000..a3a9d2c --- /dev/null +++ b/packages/orchestrator/internal/server/sandboxes_test.go @@ -0,0 +1,87 @@ +package server + +import ( + "context" + "reflect" + "testing" + "time" + + "go.opentelemetry.io/otel/trace/noop" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/service" + "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" +) + +var ( + startTime = time.Now() + endTime = time.Now().Add(time.Hour) +) + +func Test_server_List(t *testing.T) { + type args struct { + ctx context.Context + in1 *emptypb.Empty + } + tests := []struct { + name string + args args + want *orchestrator.SandboxListResponse + wantErr bool + data []*sandbox.Sandbox + }{ + { + name: "should return all sandboxes", + + args: args{ + ctx: context.Background(), + in1: &emptypb.Empty{}, + }, + data: []*sandbox.Sandbox{ + { + Metadata: &sandbox.Metadata{ + Config: &orchestrator.SandboxConfig{ + TemplateId: "template-id", + }, + StartedAt: startTime, + EndAt: endTime, + }, + }, + }, + want: &orchestrator.SandboxListResponse{ + Sandboxes: []*orchestrator.RunningSandbox{ + { + Config: &orchestrator.SandboxConfig{TemplateId: "template-id"}, + // ClientId: "client-id", + StartTime: timestamppb.New(startTime), + EndTime: timestamppb.New(endTime), + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &server{ + sandboxes: smap.New[*sandbox.Sandbox](), + tracer: noop.NewTracerProvider().Tracer(""), + info: &service.ServiceInfo{}, + } + for _, sbx := range tt.data { + s.sandboxes.Insert(sbx.Config.SandboxId, sbx) + } + got, err := s.List(tt.args.ctx, tt.args.in1) + if (err != nil) != tt.wantErr { + t.Errorf("server.List() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("server.List() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/packages/orchestrator/internal/service/info.go b/packages/orchestrator/internal/service/info.go new file mode 100644 index 0000000..bf721a6 --- /dev/null +++ b/packages/orchestrator/internal/service/info.go @@ -0,0 +1,72 @@ +package service + +import ( + "sync" + "time" + + "github.com/google/uuid" + "go.uber.org/zap" + + orchestratorinfo "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator-info" +) + +type ServiceInfo struct { + ClientId string + ServiceId string + + SourceVersion string + SourceCommit string + + Startup time.Time + Roles []orchestratorinfo.ServiceInfoRole + + status orchestratorinfo.ServiceInfoStatus + statusMu sync.RWMutex +} + +var serviceRolesMapper = map[ServiceType]orchestratorinfo.ServiceInfoRole{ + Orchestrator: orchestratorinfo.ServiceInfoRole_Orchestrator, + TemplateManager: orchestratorinfo.ServiceInfoRole_TemplateBuilder, +} + +func (s *ServiceInfo) GetStatus() orchestratorinfo.ServiceInfoStatus { + s.statusMu.RLock() + defer s.statusMu.RUnlock() + + return s.status +} + +func (s *ServiceInfo) SetStatus(status orchestratorinfo.ServiceInfoStatus) { + s.statusMu.Lock() + defer s.statusMu.Unlock() + + if s.status != status { + zap.L().Info("Service status changed", zap.String("status", status.String())) + s.status = status + } +} + +func NewInfoContainer(clientId string, sourceVersion string, sourceCommit string) *ServiceInfo { + services := GetServices() + serviceRoles := make([]orchestratorinfo.ServiceInfoRole, 0) + + for _, service := range services { + if role, ok := serviceRolesMapper[service]; ok { + serviceRoles = append(serviceRoles, role) + } + } + + serviceInfo := &ServiceInfo{ + ClientId: clientId, + ServiceId: uuid.NewString(), + Startup: time.Now(), + Roles: serviceRoles, + + SourceVersion: sourceVersion, + SourceCommit: sourceCommit, + } + + serviceInfo.SetStatus(orchestratorinfo.ServiceInfoStatus_OrchestratorHealthy) + + return serviceInfo +} diff --git a/packages/orchestrator/internal/service/service.go b/packages/orchestrator/internal/service/service.go new file mode 100644 index 0000000..0729695 --- /dev/null +++ b/packages/orchestrator/internal/service/service.go @@ -0,0 +1,68 @@ +package service + +import ( + "strings" + + "github.com/e2b-dev/infra/packages/shared/pkg/env" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +type ServiceType string + +const ( + UnknownService ServiceType = "orch-unknown" + Orchestrator ServiceType = "orchestrator" + TemplateManager ServiceType = "template-manager" +) + +// ParseServiceType converts a string to a ServiceType. +// It is case-insensitive and defaults to UnknownService. +func ParseServiceType(s string) ServiceType { + switch strings.ToLower(strings.TrimSpace(s)) { + case string(Orchestrator): + return Orchestrator + case string(TemplateManager): + return TemplateManager + default: + return UnknownService + } +} + +// GetServices parses the ORCHESTRATOR_SERVICES environment variable +// and returns a slice of known ServiceTypes. +func GetServices() []ServiceType { + servicesEnv := env.GetEnv("ORCHESTRATOR_SERVICES", string(Orchestrator)) + rawServiceNames := strings.Split(servicesEnv, ",") + + var services []ServiceType + for _, name := range rawServiceNames { + service := ParseServiceType(name) + if service != UnknownService { + services = append(services, service) + } + } + + return services +} + +// GetServiceName returns a single string identifier for the given services. +// If multiple services are present, they are joined with underscores. +func GetServiceName(services []ServiceType) string { + if len(services) == 0 { + return string(UnknownService) + } + + var builder strings.Builder + for i, s := range services { + if i > 0 { + builder.WriteString("_") + } + builder.WriteString(string(s)) + } + + return builder.String() +} + +func GetClientID() string { + return utils.RequiredEnv("NODE_ID", "Nomad ID of the instance node") +} diff --git a/packages/orchestrator/internal/service/service_info.go b/packages/orchestrator/internal/service/service_info.go new file mode 100644 index 0000000..4723abb --- /dev/null +++ b/packages/orchestrator/internal/service/service_info.go @@ -0,0 +1,68 @@ +package service + +import ( + "context" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + orchestratorinfo "github.com/e2b-dev/infra/packages/shared/pkg/grpc/orchestrator-info" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" +) + +type Server struct { + orchestratorinfo.UnimplementedInfoServiceServer + + info *ServiceInfo + sandboxes *smap.Map[*sandbox.Sandbox] +} + +func NewInfoService(_ context.Context, grpc *grpc.Server, info *ServiceInfo, sandboxes *smap.Map[*sandbox.Sandbox]) *Server { + s := &Server{ + info: info, + sandboxes: sandboxes, + } + + orchestratorinfo.RegisterInfoServiceServer(grpc, s) + return s +} + +func (s *Server) ServiceInfo(_ context.Context, _ *emptypb.Empty) (*orchestratorinfo.ServiceInfoResponse, error) { + info := s.info + + metricVCpuUsed := int64(0) + metricMemoryUsedMb := int64(0) + metricDiskMb := int64(0) + + for _, item := range s.sandboxes.Items() { + metricVCpuUsed += item.Config.Vcpu + metricMemoryUsedMb += item.Config.RamMb + metricDiskMb += item.Config.TotalDiskSizeMb + } + + return &orchestratorinfo.ServiceInfoResponse{ + NodeId: info.ClientId, + ServiceId: info.ServiceId, + ServiceStatus: info.GetStatus(), + + ServiceVersion: info.SourceVersion, + ServiceCommit: info.SourceCommit, + + ServiceStartup: timestamppb.New(info.Startup), + ServiceRoles: info.Roles, + + MetricVcpuUsed: metricVCpuUsed, + MetricMemoryUsedMb: metricMemoryUsedMb, + MetricDiskMb: metricDiskMb, + MetricSandboxesRunning: int64(s.sandboxes.Count()), + }, nil +} + +func (s *Server) ServiceStatusOverride(_ context.Context, req *orchestratorinfo.ServiceStatusChangeRequest) (*emptypb.Empty, error) { + zap.L().Info("service status override request received", zap.String("status", req.ServiceStatus.String())) + s.info.SetStatus(req.ServiceStatus) + return &emptypb.Empty{}, nil +} diff --git a/packages/orchestrator/internal/template/build/build.go b/packages/orchestrator/internal/template/build/build.go new file mode 100644 index 0000000..01566f0 --- /dev/null +++ b/packages/orchestrator/internal/template/build/build.go @@ -0,0 +1,67 @@ +package build + +import ( + "context" + _ "embed" + "fmt" + "text/template" + + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/uuid" + "go.opentelemetry.io/otel/trace" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/block" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" +) + +//go:embed provision.sh +var provisionScriptFile string +var ProvisionScriptTemplate = template.Must(template.New("provisioning-script").Parse(provisionScriptFile)) + +//go:embed configure.sh +var configureScriptFile string +var ConfigureScriptTemplate = template.Must(template.New("provisioning-finish-script").Parse(configureScriptFile)) + +func Build( + ctx context.Context, + tracer trace.Tracer, + templateConfig *TemplateConfig, + postProcessor *writer.PostProcessor, + artifactRegistry artifactsregistry.ArtifactsRegistry, + templateBuildDir string, + rootfsPath string, +) (r *block.Local, m *block.Local, c containerregistry.Config, e error) { + childCtx, childSpan := tracer.Start(ctx, "template-build") + defer childSpan.End() + + // Create a rootfs file + rtfs := NewRootfs(artifactRegistry, templateConfig) + config, err := rtfs.createExt4Filesystem(childCtx, tracer, postProcessor, rootfsPath) + if err != nil { + return nil, nil, containerregistry.Config{}, fmt.Errorf("error creating rootfs for template '%s' during build '%s': %w", templateConfig.TemplateId, templateConfig.BuildId, err) + } + + buildIDParsed, err := uuid.Parse(templateConfig.BuildId) + if err != nil { + return nil, nil, containerregistry.Config{}, fmt.Errorf("failed to parse build id: %w", err) + } + + rootfs, err := block.NewLocal(rootfsPath, templateConfig.RootfsBlockSize(), buildIDParsed) + if err != nil { + return nil, nil, containerregistry.Config{}, fmt.Errorf("error reading rootfs blocks: %w", err) + } + + // Create empty memfile + memfilePath, err := NewMemory(templateBuildDir, templateConfig.MemoryMB) + if err != nil { + return nil, nil, containerregistry.Config{}, fmt.Errorf("error creating memfile: %w", err) + } + + memfile, err := block.NewLocal(memfilePath, templateConfig.MemfilePageSize(), buildIDParsed) + if err != nil { + return nil, nil, containerregistry.Config{}, fmt.Errorf("error creating memfile blocks: %w", err) + } + + return rootfs, memfile, config, nil +} diff --git a/packages/orchestrator/internal/template/build/command.go b/packages/orchestrator/internal/template/build/command.go new file mode 100644 index 0000000..50a3e86 --- /dev/null +++ b/packages/orchestrator/internal/template/build/command.go @@ -0,0 +1,141 @@ +package build + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "connectrpc.com/connect" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + "github.com/e2b-dev/infra/packages/shared/pkg/grpc" + "github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/process" + "github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/process/processconnect" +) + +const httpTimeout = 600 * time.Second + +func (b *TemplateBuilder) runCommand( + ctx context.Context, + postProcessor *writer.PostProcessor, + id string, + sandboxID string, + command string, + runAsUser string, + cwd *string, + envVars map[string]string, +) error { + return b.runCommandWithConfirmation( + ctx, + postProcessor, + id, + sandboxID, + command, + runAsUser, + cwd, + envVars, + // No confirmation needed for this command + make(chan struct{}), + ) +} + +func (b *TemplateBuilder) runCommandWithConfirmation( + ctx context.Context, + postProcessor *writer.PostProcessor, + id string, + sandboxID string, + command string, + runAsUser string, + cwd *string, + envVars map[string]string, + confirmCh chan<- struct{}, +) error { + runCmdReq := connect.NewRequest(&process.StartRequest{ + Process: &process.ProcessConfig{ + Cmd: "/bin/bash", + Cwd: cwd, + Args: []string{ + "-l", "-c", command, + }, + Envs: envVars, + }, + }) + + hc := http.Client{ + Timeout: httpTimeout, + } + proxyHost := fmt.Sprintf("http://localhost%s", b.proxy.GetAddr()) + processC := processconnect.NewProcessClient(&hc, proxyHost) + err := grpc.SetSandboxHeader(runCmdReq.Header(), proxyHost, sandboxID) + if err != nil { + return fmt.Errorf("failed to set sandbox header: %w", err) + } + grpc.SetUserHeader(runCmdReq.Header(), runAsUser) + + processCtx, processCancel := context.WithCancel(ctx) + defer processCancel() + commandStream, err := processC.Start(processCtx, runCmdReq) + // Confirm the command has executed before proceeding + close(confirmCh) + if err != nil { + return fmt.Errorf("error starting process: %w", err) + } + defer func() { + processCancel() + commandStream.Close() + }() + + msgCh, msgErrCh := grpc.StreamToChannel(ctx, commandStream) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-msgErrCh: + return err + case msg, ok := <-msgCh: + if !ok { + return nil + } + e := msg.Event + if e == nil { + zap.L().Error("received nil command event") + return nil + } + + switch { + case e.GetData() != nil: + data := e.GetData() + b.logStream(postProcessor, id, "stdout", string(data.GetStdout())) + b.logStream(postProcessor, id, "stderr", string(data.GetStderr())) + + case e.GetEnd() != nil: + end := e.GetEnd() + name := fmt.Sprintf("exit %d", end.GetExitCode()) + b.logStream(postProcessor, id, name, end.GetStatus()) + + if end.GetExitCode() != 0 { + return fmt.Errorf("command failed: %s", end.GetStatus()) + } + } + } + } +} + +func (b *TemplateBuilder) logStream(postProcessor *writer.PostProcessor, id string, name string, content string) { + if content == "" { + return + } + for _, line := range strings.Split(content, "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + msg := fmt.Sprintf("[%s] [%s]: %s", id, name, line) + postProcessor.WriteMsg(msg) + b.buildLogger.Info(msg) + } +} diff --git a/packages/orchestrator/internal/template/build/configure.sh b/packages/orchestrator/internal/template/build/configure.sh new file mode 100644 index 0000000..d2f36b6 --- /dev/null +++ b/packages/orchestrator/internal/template/build/configure.sh @@ -0,0 +1,41 @@ +#!/bin/bash +export BASH_XTRACEFD=1 +set -euo pipefail + +echo "Starting configuration script" + +echo "Enable swap" +echo 0 > /proc/sys/vm/swappiness +swapon /swap/swapfile + +# Create default user. +# if the /home/user directory exists, we copy the skeleton files to it because the adduser command +# will ignore the directory if it exists, but we want to include the skeleton files in the home directory +# in our case. +echo "Create default user 'user' (if doesn't exist yet)" +ADDUSER_OUTPUT=$(adduser -disabled-password --gecos "" user 2>&1 || true) +echo "$ADDUSER_OUTPUT" +if echo "$ADDUSER_OUTPUT" | grep -q "The home directory \`/home/user' already exists"; then + # Copy skeleton files if they don't exist in the home directory + echo "Copy skeleton files to /home/user" + cp -rn /etc/skel/. /home/user/ +fi + +echo "Add sudo to 'user' with no password" +usermod -aG sudo user +passwd -d user +echo "user ALL=(ALL:ALL) NOPASSWD: ALL" >>/etc/sudoers + +echo "Give 'user' ownership to /home/user" +mkdir -p /home/user +chown -R user:user /home/user + +echo "Give 777 permission to /usr/local" +chmod 777 -R /usr/local + +echo "Create /code directory" +mkdir -p /code +echo "Give 777 permission to /code" +chmod 777 -R /code + +echo "Finished configuration script" diff --git a/packages/orchestrator/internal/template/build/envd.go b/packages/orchestrator/internal/template/build/envd.go new file mode 100644 index 0000000..72bf4c6 --- /dev/null +++ b/packages/orchestrator/internal/template/build/envd.go @@ -0,0 +1,19 @@ +package build + +import ( + "context" + "fmt" + "os/exec" + "strings" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage" +) + +func GetEnvdVersion(ctx context.Context) (string, error) { + cmd := exec.Command(storage.HostEnvdPath, "-version") + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("error while getting envd version: %w", err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/packages/orchestrator/internal/template/build/ext4/tools.go b/packages/orchestrator/internal/template/build/ext4/tools.go new file mode 100644 index 0000000..4df3cd2 --- /dev/null +++ b/packages/orchestrator/internal/template/build/ext4/tools.go @@ -0,0 +1,334 @@ +package ext4 + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "golang.org/x/sys/unix" + + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +const ( + // creates an inode for every bytes-per-inode byte of space on the disk + inodesRatio = int64(4096) + // Percentage of reserved blocks in the filesystem + reservedBlocksPercentage = int64(0) + + ToMBShift = 20 +) + +func Make(ctx context.Context, tracer trace.Tracer, rootfsPath string, sizeMb int64, blockSize int64) error { + ctx, tuneSpan := tracer.Start(ctx, "make-ext4") + defer tuneSpan.End() + + if blockSize < inodesRatio { + return fmt.Errorf("block size must be greater than inodes ratio") + } + + cmd := exec.CommandContext(ctx, + "mkfs.ext4", + // Matches the final ext4 features used by tar2ext4 tool + // But enables resize_inode, sparse_super (default, required for resize_inode) + "-O", `^has_journal,^dir_index,^64bit,^dir_nlink,^metadata_csum,ext_attr,sparse_super2,filetype,extent,flex_bg,large_file,huge_file,extra_isize`, + "-b", strconv.FormatInt(blockSize, 10), + "-m", strconv.FormatInt(reservedBlocksPercentage, 10), + "-i", strconv.FormatInt(inodesRatio, 10), + rootfsPath, + strconv.FormatInt(sizeMb, 10)+"M", + ) + + tuneStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = tuneStdoutWriter + + tuneStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = tuneStderrWriter + + return cmd.Run() +} + +func Mount(ctx context.Context, tracer trace.Tracer, rootfsPath string, mountPoint string) error { + ctx, mountSpan := tracer.Start(ctx, "mount-ext4") + defer mountSpan.End() + + cmd := exec.CommandContext(ctx, "mount", "-o", "loop", rootfsPath, mountPoint) + + mountStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = mountStdoutWriter + + mountStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = mountStderrWriter + + if err := cmd.Run(); err != nil { + return fmt.Errorf("error mounting ext4 filesystem: %w", err) + } + + return nil +} + +func Unmount(ctx context.Context, tracer trace.Tracer, rootfsPath string) error { + ctx, unmountSpan := tracer.Start(ctx, "unmount-ext4") + defer unmountSpan.End() + + cmd := exec.CommandContext(ctx, "umount", rootfsPath) + + unmountStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = unmountStdoutWriter + + unmountStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = unmountStderrWriter + + if err := cmd.Run(); err != nil { + return fmt.Errorf("error unmounting ext4 filesystem: %w", err) + } + + return nil +} + +func MakeWritable(ctx context.Context, tracer trace.Tracer, rootfsPath string) error { + ctx, tuneSpan := tracer.Start(ctx, "tune-ext4-writable") + defer tuneSpan.End() + + cmd := exec.CommandContext(ctx, "tune2fs", "-O ^read-only", rootfsPath) + + tuneStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = tuneStdoutWriter + + tuneStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = tuneStderrWriter + + return cmd.Run() +} + +func Enlarge(ctx context.Context, tracer trace.Tracer, rootfsPath string, addSize int64) (int64, error) { + ctx, resizeSpan := tracer.Start(ctx, "enlarge-ext4") + defer resizeSpan.End() + + stat, err := os.Stat(rootfsPath) + if err != nil { + return 0, fmt.Errorf("error stating rootfs file: %w", err) + } + finalSize := stat.Size() + addSize + + return Resize(ctx, tracer, rootfsPath, finalSize) +} + +func Resize(ctx context.Context, tracer trace.Tracer, rootfsPath string, targetSize int64) (int64, error) { + ctx, resizeSpan := tracer.Start(ctx, "resize-ext4") + defer resizeSpan.End() + + // Resize the ext4 filesystem + // The underlying file must be synced to the filesystem + cmd := exec.CommandContext(ctx, "resize2fs", rootfsPath, strconv.FormatInt(targetSize>>ToMBShift, 10)+"M") + resizeStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = resizeStdoutWriter + resizeStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = resizeStderrWriter + err := cmd.Run() + if err != nil { + LogMetadata(rootfsPath) + return 0, fmt.Errorf("error resizing rootfs file: %w", err) + } + + stat, err := os.Stat(rootfsPath) + if err != nil { + return 0, fmt.Errorf("error stating rootfs file after resize: %w", err) + } + + return stat.Size(), err +} + +func Shrink(ctx context.Context, tracer trace.Tracer, rootfsPath string) (int64, error) { + ctx, resizeSpan := tracer.Start(ctx, "shrink-ext4") + defer resizeSpan.End() + + // Shrink the ext4 filesystem + // The underlying file must be synced to the filesystem + cmd := exec.CommandContext(ctx, "resize2fs", "-M", rootfsPath) + resizeStdoutWriter := telemetry.NewEventWriter(ctx, "stdout") + cmd.Stdout = resizeStdoutWriter + resizeStderrWriter := telemetry.NewEventWriter(ctx, "stderr") + cmd.Stderr = resizeStderrWriter + err := cmd.Run() + if err != nil { + LogMetadata(rootfsPath) + return 0, fmt.Errorf("error shrinking rootfs file: %w", err) + } + + stat, err := os.Stat(rootfsPath) + if err != nil { + return 0, fmt.Errorf("error stating rootfs file after resize: %w", err) + } + + return stat.Size(), err +} + +func GetFreeSpace(ctx context.Context, tracer trace.Tracer, rootfsPath string, blockSize int64) (int64, error) { + _, statSpan := tracer.Start(ctx, "stat-ext4-file") + defer statSpan.End() + + cmd := exec.Command("debugfs", "-R", "stats", rootfsPath) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + output := out.String() + if err != nil { + zap.L().Error("Error getting free space", zap.Error(err), zap.String("output", output)) + return 0, fmt.Errorf("error statting ext4: %w", err) + } + + // Extract block size and free blocks + freeBlocks, err := parseFreeBlocks(output) + if err != nil { + return 0, fmt.Errorf("could not parse free blocks: %w", err) + } + + reservedBlocks, err := parseReservedBlocks(output) + if err != nil { + return 0, fmt.Errorf("could not parse reserved blocks: %w", err) + } + + freeBytes := (freeBlocks - reservedBlocks) * blockSize + return freeBytes, nil +} + +func CheckIntegrity(rootfsPath string, fix bool) (string, error) { + LogMetadata(rootfsPath) + accExitCode := 0 + args := "-nfv" + if fix { + // 0 - No errors + // 1 - File system errors corrected + // 2 - File system errors corrected, a system should be rebooted + accExitCode = 2 + args = "-pfv" + } + cmd := exec.Command("e2fsck", args, rootfsPath) + out, err := cmd.CombinedOutput() + if err != nil { + exitCode := cmd.ProcessState.ExitCode() + + if exitCode > accExitCode { + return string(out), fmt.Errorf("error running e2fsck: %w", err) + } + } + + return strings.TrimSpace(string(out)), nil +} + +func ReadFile(ctx context.Context, tracer trace.Tracer, rootfsPath string, filePath string) (string, error) { + _, statSpan := tracer.Start(ctx, "ext4-read-file") + defer statSpan.End() + + cmd := exec.Command("debugfs", "-R", fmt.Sprintf("cat \"%s\"", filePath), rootfsPath) + out, err := cmd.Output() + if err != nil { + return "1", fmt.Errorf("error reading file: %w", err) + } + + return string(out), nil +} + +func RemoveFile(ctx context.Context, tracer trace.Tracer, rootfsPath string, filePath string) error { + _, statSpan := tracer.Start(ctx, "ext4-remove-file") + defer statSpan.End() + + // -w is used to open the filesystem in writable mode + cmd := exec.Command("debugfs", "-w", "-R", fmt.Sprintf("rm \"%s\"", filePath), rootfsPath) + out, err := cmd.CombinedOutput() + if err != nil { + zap.L().Error("error removing file", zap.Error(err), zap.String("output", string(out))) + return fmt.Errorf("error removing file: %w", err) + } + + return nil +} + +// MountOverlayFS mounts an overlay filesystem with the specified layers at the given mount point. +// It requires kernel version 6.8 or later to use the fsconfig interface for overlayfs. +// Older mount syscall is not used because it has lowerdirs character limit (4096 characters). +func MountOverlayFS(ctx context.Context, tracer trace.Tracer, layers []string, mountPoint string) error { + _, mountSpan := tracer.Start(ctx, "mount-overlay-fs", trace.WithAttributes( + attribute.String("mount", mountPoint), + attribute.StringSlice("layers", layers), + )) + defer mountSpan.End() + + // Open the filesystem for configuration + fsfd, err := unix.Fsopen("overlay", unix.FSOPEN_CLOEXEC) + if err != nil { + return fmt.Errorf("fsopen failed: %w", err) + } + defer unix.Close(fsfd) + + // Set lowerdir using FSCONFIG_SET_STRING + for _, layer := range layers { + // https://docs.kernel.org/filesystems/overlayfs.html + if err := unix.FsconfigSetString(fsfd, "lowerdir+", layer); err != nil { + return fmt.Errorf("fsconfig lowerdir failed: %w", err) + } + } + + // Finalize configuration + if err := unix.FsconfigCreate(fsfd); err != nil { + return fmt.Errorf("fsconfig create failed: %w", err) + } + + // Create the mount + mfd, err := unix.Fsmount(fsfd, 0, 0) + if err != nil { + return fmt.Errorf("fsmount failed: %w", err) + } + defer unix.Close(mfd) + + // Mount to target + if err := unix.MoveMount(mfd, "", -1, mountPoint, unix.MOVE_MOUNT_F_EMPTY_PATH); err != nil { + return fmt.Errorf("move mount failed: %w", err) + } + + return nil +} + +func LogMetadata(rootfsPath string, extraFields ...zap.Field) { + cmd := exec.Command("tune2fs", "-l", rootfsPath) + output, err := cmd.CombinedOutput() + + zap.L().With(extraFields...).Debug("tune2fs -l output", zap.String("path", rootfsPath), zap.String("output", string(output)), zap.Error(err)) +} + +// parseFreeBlocks extracts the "Free blocks:" value from debugfs output +func parseFreeBlocks(debugfsOutput string) (int64, error) { + re := regexp.MustCompile(`Free blocks:\s+(\d+)`) + matches := re.FindStringSubmatch(debugfsOutput) + if len(matches) < 2 { + return 0, fmt.Errorf("could not find free blocks in debugfs output") + } + freeBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("could not parse free blocks: %w", err) + } + return freeBlocks, nil +} + +// parseReservedBlocks extracts the "Reserved block count:" value from debugfs output +func parseReservedBlocks(debugfsOutput string) (int64, error) { + re := regexp.MustCompile(`Reserved block count:\s+(\d+)`) + matches := re.FindStringSubmatch(debugfsOutput) + if len(matches) < 2 { + return 0, fmt.Errorf("could not find reserved blocks in debugfs output") + } + reservedBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("could not parse reserved blocks: %w", err) + } + return reservedBlocks, nil +} diff --git a/packages/orchestrator/internal/template/build/memory.go b/packages/orchestrator/internal/template/build/memory.go new file mode 100644 index 0000000..d486c81 --- /dev/null +++ b/packages/orchestrator/internal/template/build/memory.go @@ -0,0 +1,34 @@ +package build + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + memoryBuildFileName = "memfile.build" +) + +func NewMemory(memoryBuildDir string, sizeMb int64) (string, error) { + emptyMemoryFilePath := filepath.Join(memoryBuildDir, memoryBuildFileName) + emptyMemoryFile, err := os.Create(emptyMemoryFilePath) + if err != nil { + return "", fmt.Errorf("error creating blank memfile: %w", err) + } + defer emptyMemoryFile.Close() + + err = emptyMemoryFile.Truncate(sizeMb << ToMBShift) + if err != nil { + return "", fmt.Errorf("error truncating blank memfile: %w", err) + } + + // Sync the metadata to disk. + // This is important to ensure that the file is fully written when used by other processes, like FC. + err = emptyMemoryFile.Sync() + if err != nil { + return "", fmt.Errorf("error syncing blank memfile: %w", err) + } + + return emptyMemoryFilePath, nil +} diff --git a/packages/orchestrator/internal/template/build/oci/oci.go b/packages/orchestrator/internal/template/build/oci/oci.go new file mode 100644 index 0000000..6fb6ee7 --- /dev/null +++ b/packages/orchestrator/internal/template/build/oci/oci.go @@ -0,0 +1,314 @@ +package oci + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/archive" + "github.com/dustin/go-humanize" + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/ext4" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +const ( + ToMBShift = 20 + tarballExportUpdates = 10 +) + +func GetImage(ctx context.Context, tracer trace.Tracer, artifactRegistry artifactsregistry.ArtifactsRegistry, templateId string, buildId string) (containerregistry.Image, error) { + childCtx, childSpan := tracer.Start(ctx, "pull-docker-image") + defer childSpan.End() + + platform := containerregistry.Platform{ + OS: "linux", + Architecture: "amd64", + } + + img, err := artifactRegistry.GetImage(childCtx, templateId, buildId, platform) + if err != nil { + return nil, fmt.Errorf("error pulling image: %w", err) + } + + telemetry.ReportEvent(childCtx, "pulled image") + return img, nil +} + +func GetImageSize(img containerregistry.Image) (int64, error) { + imageSize := int64(0) + + layers, err := img.Layers() + if err != nil { + return 0, fmt.Errorf("error getting image layers: %w", err) + } + + for index, layer := range layers { + layerSize, err := layer.Size() + if err != nil { + return 0, fmt.Errorf("error getting layer (%d) size: %w", index, err) + } + imageSize += layerSize + } + + return imageSize, nil +} + +func ToExt4(ctx context.Context, tracer trace.Tracer, postProcessor *writer.PostProcessor, img containerregistry.Image, rootfsPath string, maxSize int64, blockSize int64) (int64, error) { + ctx, childSpan := tracer.Start(ctx, "oci-to-ext4") + defer childSpan.End() + + err := ext4.Make(ctx, tracer, rootfsPath, maxSize>>ToMBShift, blockSize) + if err != nil { + return 0, fmt.Errorf("error creating ext4 file: %w", err) + } + + err = ExtractToExt4(ctx, tracer, postProcessor, img, rootfsPath) + if err != nil { + return 0, fmt.Errorf("error extracting image to ext4 filesystem: %w", err) + } + + // Check the FS integrity first so no errors occur during shrinking + _, err = ext4.CheckIntegrity(rootfsPath, true) + if err != nil { + return 0, fmt.Errorf("error checking filesystem integrity after ext4 creation: %w", err) + } + + // The filesystem is first created with the maximum size, so we need to shrink it to the actual size + size, err := ext4.Shrink(ctx, tracer, rootfsPath) + if err != nil { + return 0, fmt.Errorf("error shrinking ext4 filesystem: %w", err) + } + + // Check the FS integrity after shrinking + _, err = ext4.CheckIntegrity(rootfsPath, true) + if err != nil { + return 0, fmt.Errorf("error checking filesystem integrity after shrinking: %w", err) + } + + return size, nil +} + +func ExtractToExt4(ctx context.Context, tracer trace.Tracer, postProcessor *writer.PostProcessor, img containerregistry.Image, rootfsPath string) error { + ctx, childSpan := tracer.Start(ctx, "extract-to-ext4") + defer childSpan.End() + + tmpMount, err := os.MkdirTemp("", "ext4-mount") + if err != nil { + return fmt.Errorf("error creating temporary mount point: %w", err) + } + defer func() { + if removeErr := os.RemoveAll(tmpMount); removeErr != nil { + zap.L().Error("error removing temporary mount point", zap.Error(removeErr)) + } + }() + + err = ext4.Mount(ctx, tracer, rootfsPath, tmpMount) + if err != nil { + return fmt.Errorf("error mounting ext4 filesystem: %w", err) + } + defer func() { + if unmountErr := ext4.Unmount(ctx, tracer, tmpMount); unmountErr != nil { + zap.L().Error("error unmounting ext4 filesystem", zap.Error(unmountErr)) + } + }() + + zap.L().Debug("extracting image to ext4 filesystem", + zap.String("rootfs_path", rootfsPath), + zap.String("tmp_mount", tmpMount), + ) + + err = unpackRootfs(ctx, tracer, postProcessor, img, tmpMount) + if err != nil { + return fmt.Errorf("error extracting tar to directory: %w", err) + } + + return nil +} + +func ParseEnvs(envs []string) map[string]string { + envMap := make(map[string]string, len(envs)) + for _, env := range envs { + if strings.TrimSpace(env) == "" { + continue + } + parts := strings.SplitN(env, "=", 2) + if len(parts) != 2 { + continue + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + if key != "" && value != "" { + envMap[key] = value + } + } + return envMap +} + +func unpackRootfs(ctx context.Context, tracer trace.Tracer, postProcessor *writer.PostProcessor, srcImage containerregistry.Image, destDir string) (err error) { + ctx, childSpan := tracer.Start(ctx, "unpack-rootfs") + defer childSpan.End() + + ociPath, err := os.MkdirTemp("", "oci-image") + if err != nil { + return fmt.Errorf("while creating temporary file for squashed image: %w", err) + } + defer func() { + go os.RemoveAll(ociPath) + }() + + // Create export of layers in the temporary directory + layers, err := createExport(ctx, tracer, postProcessor, srcImage, ociPath) + if err != nil { + return fmt.Errorf("while creating export of source image: %w", err) + } + + // Mount the overlay filesystem with the extracted layers + mountPath, err := os.MkdirTemp("", "overlayfs-mount") + if err != nil { + return fmt.Errorf("while creating temporary file for squashed image: %w", err) + } + defer func() { + go os.RemoveAll(mountPath) + }() + + err = ext4.MountOverlayFS(ctx, tracer, layers, mountPath) + if err != nil { + return fmt.Errorf("while mounting overlayfs with layers: %w", err) + } + defer func() { + if unmountErr := ext4.Unmount(ctx, tracer, mountPath); unmountErr != nil { + zap.L().Error("error unmounting overlayfs mount point", zap.Error(unmountErr)) + } + }() + + // List files in the mount point + files, err := listFiles(ctx, tracer, mountPath) + if err != nil { + return fmt.Errorf("while listing files in overlayfs: %w", err) + } + postProcessor.WriteMsg("Root filesystem structure:") + postProcessor.WriteMsg(strings.Join(files, ", ")) + + // Copy files from the overlayfs mount point to the destination directory + err = copyFiles(ctx, tracer, mountPath, destDir) + if err != nil { + return fmt.Errorf("while copying files from overlayfs to destination directory: %w", err) + } + + return nil +} + +func listFiles(ctx context.Context, tracer trace.Tracer, dir string) ([]string, error) { + _, childSpan := tracer.Start(ctx, "list-files") + defer childSpan.End() + + files, err := os.ReadDir(dir) + if err != nil { + return nil, fmt.Errorf("while reading directory %s: %w", dir, err) + } + + return utils.Map(files, func(file os.DirEntry) string { + return file.Name() + }), nil +} + +// copyFiles uses rsync to copy files from the source directory to the destination directory. +func copyFiles(ctx context.Context, tracer trace.Tracer, src, dest string) error { + _, childSpan := tracer.Start(ctx, "copy-files") + defer childSpan.End() + + // Does the following: + // Recursion into directories + // Symlinks + // Permissions + // Modification times + // Group/owner (if possible) + // Device files and special files + // Hard links (-H) + // + // --whole-file: Copy files without using the delta algorithm, which is faster for local copies + // --inplace: Update destination files in place, no need to create temporary files + cmd := exec.Command("rsync", "-aH", "--whole-file", "--inplace", src+"/", dest) + if out, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("while copying files from %s to %s: %w: %s", src, dest, err, string(out)) + } + return nil +} + +// createExport extracts the layers of the source image into a temporary directory +// and returns the paths of the extracted layers. The layers are extracted in reverse order +// to maintain the correct order for overlayFS. +// The layers are extracted in parallel to speed up the process. +func createExport(ctx context.Context, tracer trace.Tracer, postProcessor *writer.PostProcessor, srcImage containerregistry.Image, path string) ([]string, error) { + ctx, childSpan := tracer.Start(ctx, "create-oci-export") + defer childSpan.End() + + layers, err := srcImage.Layers() + if err != nil { + return nil, fmt.Errorf("while getting layers of source image: %w", err) + } + + layerPaths := make([]string, len(layers)) + var eg errgroup.Group + for i, l := range layers { + digest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d: %w", i, err) + } + size, err := l.Size() + if err != nil { + return nil, fmt.Errorf("failed to get size of layer %d: %w", i, err) + } + telemetry.ReportEvent(ctx, "uncompressing layer", + attribute.Int("layer.index", i), + attribute.String("layer.digest", digest.String()), + attribute.Int64("layer.size", size), + ) + postProcessor.WriteMsg(fmt.Sprintf("Uncompressing layer %s %s", digest, humanize.Bytes(uint64(size)))) + + // Each layer has to be uniquely named, even if the digest is the same across different layers + layerPath := filepath.Join(path, fmt.Sprintf("layer-%d-%s", i, strings.ReplaceAll(digest.String(), ":", "-"))) + // Layers need to be reported in reverse order to maintain the correct layer order for overlayfs + layerPaths[len(layers)-i-1] = layerPath + eg.Go(func() error { + err := os.MkdirAll(layerPath, 0o755) + if err != nil { + return fmt.Errorf("failed to create directory for layer %d: %w", i, err) + } + + rc, err := l.Uncompressed() + if err != nil { + return fmt.Errorf("failed to get uncompressed layer %d: %w", i, err) + } + defer rc.Close() + + err = archive.Untar(rc, layerPath, &archive.TarOptions{ + IgnoreChownErrors: true, + }) + if err != nil { + return fmt.Errorf("failed to untar layer %d: %w", i, err) + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, fmt.Errorf("while extracting layers: %w", err) + } + + postProcessor.WriteMsg("Layers extracted") + + return layerPaths, nil +} diff --git a/packages/orchestrator/internal/template/build/oci/oci_test.go b/packages/orchestrator/internal/template/build/oci/oci_test.go new file mode 100644 index 0000000..973c87b --- /dev/null +++ b/packages/orchestrator/internal/template/build/oci/oci_test.go @@ -0,0 +1,88 @@ +package oci + +import ( + "archive/tar" + "bytes" + "io" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/trace/noop" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" +) + +func createFileTar(t *testing.T, fileName string) *bytes.Buffer { + t.Helper() + + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + // Add a file to the tarball + content := []byte("layer text") + err := tw.WriteHeader(&tar.Header{ + Name: fileName + ".txt", + Mode: 0o600, + Size: int64(len(content)), + }) + if err != nil { + t.Fatal(err) + } + if _, err := tw.Write(content); err != nil { + t.Fatal(err) + } + tw.Close() + + return &buf +} + +func TestCreateExportLayersOrder(t *testing.T) { + ctx := t.Context() + + tracer := noop.NewTracerProvider().Tracer("test") + postProcessor := writer.NewPostProcessor(ctx, io.Discard) + + // Create a dummy image with some layers + img := empty.Image + layer1, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(createFileTar(t, "layer0").Bytes())), nil + }) + require.NoError(t, err) + img, err = mutate.AppendLayers(img, layer1) + require.NoError(t, err) + + layer2, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(createFileTar(t, "layer1").Bytes())), nil + }) + require.NoError(t, err) + img, err = mutate.AppendLayers(img, layer2) + require.NoError(t, err) + + layer3, err := tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(createFileTar(t, "layer2").Bytes())), nil + }) + require.NoError(t, err) + img, err = mutate.AppendLayers(img, layer3) + require.NoError(t, err) + + // Export the layers + dir := t.TempDir() + layers, err := createExport(ctx, tracer, postProcessor, img, dir) + require.NoError(t, err) + require.NotNil(t, layers) + + // Layers should be in reverse order + assert.Equal(t, 3, len(layers)) + assert.Regexp(t, "/layer-2.*", strings.TrimPrefix(layers[0], dir)) + assert.FileExists(t, filepath.Join(layers[0], "layer2.txt")) + assert.Regexp(t, "/layer-1.*", strings.TrimPrefix(layers[1], dir)) + assert.FileExists(t, filepath.Join(layers[1], "layer1.txt")) + assert.Regexp(t, "/layer-0.*", strings.TrimPrefix(layers[2], dir)) + assert.FileExists(t, filepath.Join(layers[2], "layer0.txt")) +} diff --git a/packages/orchestrator/internal/template/build/provision.sh b/packages/orchestrator/internal/template/build/provision.sh new file mode 100644 index 0000000..6b2491c --- /dev/null +++ b/packages/orchestrator/internal/template/build/provision.sh @@ -0,0 +1,110 @@ +#!/bin/bash +set -euo pipefail + +echo "Starting provisioning script" + +# fix: dpkg-statoverride: warning: --update given but /var/log/chrony does not exist +mkdir -p /var/log/chrony + +echo "Making configuration immutable" +chattr +i /etc/resolv.conf + +# Install required packages if not already installed +PACKAGES="systemd systemd-sysv openssh-server sudo chrony linuxptp" +echo "Checking presence of the following packages: $PACKAGES" + +MISSING=() +for pkg in $PACKAGES; do + if ! dpkg-query -W -f='${Status}' "$pkg" 2>/dev/null | grep -q "install ok installed"; then + echo "Package $pkg is missing, will install it." + MISSING+=("$pkg") + fi +done + +if [ ${#MISSING[@]} -ne 0 ]; then + echo "Missing packages detected, installing: ${MISSING[*]}" + apt-get -qq update + DEBIAN_FRONTEND=noninteractive DEBCONF_NOWARNINGS=yes apt-get -qq -o=Dpkg::Use-Pty=0 install -y --no-install-recommends "${MISSING[@]}" +else + echo "All required packages are already installed." +fi + +echo "Setting up shell" +echo "export SHELL='/bin/bash'" >/etc/profile.d/shell.sh +echo "export PS1='\w \$ '" >/etc/profile.d/prompt.sh +echo "export PS1='\w \$ '" >>"/etc/profile" +echo "export PS1='\w \$ '" >>"/root/.bashrc" + +echo "Use .bashrc and .profile" +echo "if [ -f ~/.bashrc ]; then source ~/.bashrc; fi; if [ -f ~/.profile ]; then source ~/.profile; fi" >>/etc/profile + +echo "Remove root password" +passwd -d root + +# Set up chrony. +setup_chrony(){ + echo "Setting up chrony" + mkdir -p /etc/chrony + cat </etc/chrony/chrony.conf +refclock PHC /dev/ptp0 poll -1 dpoll -1 offset 0 trust prefer +makestep 1 -1 +EOF + + # Add a proxy config, as some environments expects it there (e.g. timemaster in Node Dockerimage) + echo "include /etc/chrony/chrony.conf" >/etc/chrony.conf + + mkdir -p /etc/systemd/system/chrony.service.d + # The ExecStart= should be emptying the ExecStart= line in config. + cat </etc/systemd/system/chrony.service.d/override.conf +[Service] +ExecStart= +ExecStart=/usr/sbin/chronyd +User=root +Group=root +EOF +} + +setup_chrony + +echo "Setting up SSH" +mkdir -p /etc/ssh +cat <>/etc/ssh/sshd_config +PermitRootLogin yes +PermitEmptyPasswords yes +PasswordAuthentication yes +EOF + +configure_swap() { + echo "Configuring swap to ${1} MiB" + mkdir /swap + fallocate -l "${1}"M /swap/swapfile + chmod 600 /swap/swapfile + mkswap /swap/swapfile +} + +configure_swap 128 + +echo "Don't wait for ttyS0 (serial console kernel logs)" +# This is required when the Firecracker kernel args has specified console=ttyS0 +systemctl mask serial-getty@ttyS0.service + +echo "Disable network online wait" +systemctl mask systemd-networkd-wait-online.service + +# Clean machine-id from Docker +rm -rf /etc/machine-id + +echo "Linking systemd to init" +ln -sf /lib/systemd/systemd /usr/sbin/init + +echo "Unlocking immutable configuration" +chattr -i /etc/resolv.conf + +echo "Finished provisioning script" + +# Delete itself +rm -rf /etc/init.d/rcS +rm -rf /usr/local/bin/provision.sh + +# Report successful provisioning +echo -n "0" > "{{ .ResultPath }}" \ No newline at end of file diff --git a/packages/orchestrator/internal/template/build/ready_command.go b/packages/orchestrator/internal/template/build/ready_command.go new file mode 100644 index 0000000..9026bbd --- /dev/null +++ b/packages/orchestrator/internal/template/build/ready_command.go @@ -0,0 +1,88 @@ +package build + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" +) + +const ( + defaultReadyWait = 20 * time.Second + + readyCommandRetryInterval = 2 * time.Second + readyCommandTimeout = 5 * time.Minute +) + +func (b *TemplateBuilder) runReadyCommand( + ctx context.Context, + postProcessor *writer.PostProcessor, + template *TemplateConfig, + sandboxID string, + envVars map[string]string, +) error { + ctx, span := b.tracer.Start(ctx, "run-ready-command") + defer span.End() + + postProcessor.WriteMsg("Waiting for template to be ready") + + if template.ReadyCmd == "" { + template.ReadyCmd = getDefaultReadyCommand(template) + } + postProcessor.WriteMsg(fmt.Sprintf("[ready cmd]: %s", template.ReadyCmd)) + + startTime := time.Now() + ctx, cancel := context.WithTimeout(ctx, readyCommandTimeout) + defer cancel() + + // Start the ready check + for { + cwd := "/home/user" + err := b.runCommand( + ctx, + postProcessor, + "ready", + sandboxID, + template.ReadyCmd, + "root", + &cwd, + envVars, + ) + + if err == nil { + postProcessor.WriteMsg("Template is ready") + return nil + } else { + postProcessor.WriteMsg(fmt.Sprintf("Template is not ready: %v", err)) + } + + select { + case <-ctx.Done(): + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + return fmt.Errorf("ready command timed out after %s", time.Since(startTime)) + } + // Template is ready, the start command finished before the ready command + postProcessor.WriteMsg("Template is ready") + return nil + case <-time.After(readyCommandRetryInterval): + // Wait for readyCommandRetryInterval time before retrying the ready command + } + } +} + +func getDefaultReadyCommand(template *TemplateConfig) string { + if template.StartCmd == "" { + return fmt.Sprintf("sleep %d", 0) + } + + // HACK: This is a temporary fix for a customer that needs a bigger time to start the command. + // TODO: Remove this after we can add customizable wait time for building templates. + // TODO: Make this user configurable, with health check too + if template.TemplateId == "zegbt9dl3l2ixqem82mm" || template.TemplateId == "ot5bidkk3j2so2j02uuz" || template.TemplateId == "0zeou1s7agaytqitvmzc" { + return fmt.Sprintf("sleep %d", int((120 * time.Second).Seconds())) + } + + return fmt.Sprintf("sleep %d", int(defaultReadyWait.Seconds())) +} diff --git a/packages/orchestrator/internal/template/build/rootfs.go b/packages/orchestrator/internal/template/build/rootfs.go new file mode 100644 index 0000000..4b690fd --- /dev/null +++ b/packages/orchestrator/internal/template/build/rootfs.go @@ -0,0 +1,293 @@ +package build + +import ( + "bytes" + "context" + "fmt" + "io" + "math" + "os" + + "github.com/dustin/go-humanize" + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/ext4" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/oci" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +const ( + ToMBShift = 20 + // Max size of the rootfs file in MB. + maxRootfsSize = 15000 << ToMBShift + + rootfsBuildFileName = "rootfs.ext4.build" + rootfsProvisionLink = "rootfs.ext4.build.provision" + + // provisionScriptFileName is a path where the provision script stores it's exit code. + provisionScriptResultPath = "/provision.result" + logExternalPrefix = "[external] " + + busyBoxBinaryPath = "/bin/busybox" + busyBoxInitPath = "usr/bin/init" + systemdInitPath = "/sbin/init" +) + +type Rootfs struct { + template *TemplateConfig + artifactRegistry artifactsregistry.ArtifactsRegistry +} + +type MultiWriter struct { + writers []io.Writer +} + +func (mw *MultiWriter) Write(p []byte) (int, error) { + for _, writer := range mw.writers { + _, err := writer.Write(p) + if err != nil { + return 0, err + } + } + + return len(p), nil +} + +func NewRootfs(artifactRegistry artifactsregistry.ArtifactsRegistry, template *TemplateConfig) *Rootfs { + return &Rootfs{ + template: template, + artifactRegistry: artifactRegistry, + } +} + +func (r *Rootfs) createExt4Filesystem(ctx context.Context, tracer trace.Tracer, postProcessor *writer.PostProcessor, rootfsPath string) (c containerregistry.Config, e error) { + childCtx, childSpan := tracer.Start(ctx, "create-ext4-file") + defer childSpan.End() + + defer func() { + if e != nil { + telemetry.ReportCriticalError(childCtx, "failed to create ext4 filesystem", e) + } + }() + + postProcessor.WriteMsg("Requesting Docker Image") + + img, err := oci.GetImage(childCtx, tracer, r.artifactRegistry, r.template.TemplateId, r.template.BuildId) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error requesting docker image: %w", err) + } + + imageSize, err := oci.GetImageSize(img) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error getting image size: %w", err) + } + postProcessor.WriteMsg(fmt.Sprintf("Docker image size: %s", humanize.Bytes(uint64(imageSize)))) + + postProcessor.WriteMsg("Setting up system files") + layers, err := additionalOCILayers(childCtx, r.template) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error populating filesystem: %w", err) + } + img, err = mutate.AppendLayers(img, layers...) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error appending layers: %w", err) + } + telemetry.ReportEvent(childCtx, "set up filesystem") + + postProcessor.WriteMsg("Creating file system and pulling Docker image") + ext4Size, err := oci.ToExt4(ctx, tracer, postProcessor, img, rootfsPath, maxRootfsSize, r.template.RootfsBlockSize()) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error creating ext4 filesystem: %w", err) + } + r.template.rootfsSize = ext4Size + telemetry.ReportEvent(childCtx, "created rootfs ext4 file") + + postProcessor.WriteMsg("Filesystem cleanup") + // Make rootfs writable, be default it's readonly + err = ext4.MakeWritable(ctx, tracer, rootfsPath) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error making rootfs file writable: %w", err) + } + + // Resize rootfs + rootfsFreeSpace, err := ext4.GetFreeSpace(ctx, tracer, rootfsPath, r.template.RootfsBlockSize()) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error getting free space: %w", err) + } + // We need to remove the remaining free space from the ext4 file size + // This is a residual space that could not be shrunk when creating the filesystem, + // but is still available for use + diskAdd := r.template.DiskSizeMB< 0 { + rootfsFinalSize, err := ext4.Enlarge(ctx, tracer, rootfsPath, diskAdd) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error enlarging rootfs: %w", err) + } + r.template.rootfsSize = rootfsFinalSize + } + + // Check the rootfs filesystem corruption + ext4Check, err := ext4.CheckIntegrity(rootfsPath, true) + zap.L().Debug("filesystem ext4 integrity", + zap.String("result", ext4Check), + zap.Error(err), + ) + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error checking ext4 filesystem integrity: %w", err) + } + + config, err := img.ConfigFile() + if err != nil { + return containerregistry.Config{}, fmt.Errorf("error getting image config file: %w", err) + } + + return config.Config, nil +} + +func additionalOCILayers( + ctx context.Context, + config *TemplateConfig, +) ([]containerregistry.Layer, error) { + var scriptDef bytes.Buffer + err := ProvisionScriptTemplate.Execute(&scriptDef, struct { + ResultPath string + }{ + ResultPath: provisionScriptResultPath, + }) + if err != nil { + return nil, fmt.Errorf("error executing provision script: %w", err) + } + telemetry.ReportEvent(ctx, "executed provision script env") + + memoryLimit := int(math.Min(float64(config.MemoryMB)/2, 512)) + envdService := fmt.Sprintf(`[Unit] +Description=Env Daemon Service +After=multi-user.target + +[Service] +Type=simple +Restart=always +User=root +Group=root +Environment=GOTRACEBACK=all +LimitCORE=infinity +ExecStart=/bin/bash -l -c "/usr/bin/envd" +OOMPolicy=continue +OOMScoreAdjust=-1000 +Environment="GOMEMLIMIT=%dMiB" + +[Install] +WantedBy=multi-user.target +`, memoryLimit) + + autologinService := `[Service] +ExecStart= +ExecStart=-/sbin/agetty --noissue --autologin root %I 115200,38400,9600 vt102 +` + + hostname := "e2b.local" + + hosts := fmt.Sprintf(`127.0.0.1 localhost +::1 localhost ip6-localhost ip6-loopback +fe00:: ip6-localnet +ff00:: ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +127.0.1.1 %s +`, hostname) + + e2bFile := fmt.Sprintf(`ENV_ID=%s +BUILD_ID=%s +`, config.TemplateId, config.BuildId) + + envdFileData, err := os.ReadFile(storage.HostEnvdPath) + if err != nil { + return nil, fmt.Errorf("error reading envd file: %w", err) + } + + busyBox, err := os.ReadFile(busyBoxBinaryPath) + if err != nil { + return nil, fmt.Errorf("error reading busybox binary: %w", err) + } + + filesLayer, err := LayerFile( + map[string]layerFile{ + // Setup system + "etc/hostname": {[]byte(hostname), 0o644}, + "etc/hosts": {[]byte(hosts), 0o644}, + "etc/resolv.conf": {[]byte("nameserver 8.8.8.8"), 0o644}, + + ".e2b": {[]byte(e2bFile), 0o644}, + storage.GuestEnvdPath: {envdFileData, 0o777}, + "etc/systemd/system/envd.service": {[]byte(envdService), 0o644}, + "etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf": {[]byte(autologinService), 0o644}, + + // Provision script + "usr/local/bin/provision.sh": {scriptDef.Bytes(), 0o777}, + // Setup init system + "usr/bin/busybox": {busyBox, 0o755}, + // Set to bin/init so it's not in conflict with systemd + // Any rewrite of the init file when booted from it will corrupt the filesystem + busyBoxInitPath: {busyBox, 0o755}, + "etc/init.d/rcS": {[]byte(`#!/usr/bin/busybox ash +echo "Mounting essential filesystems" +# Ensure necessary mount points exist +mkdir -p /proc /sys /dev /tmp /run + +# Mount essential filesystems +mount -t proc proc /proc +mount -t sysfs sysfs /sys +mount -t devtmpfs devtmpfs /dev +mount -t tmpfs tmpfs /tmp +mount -t tmpfs tmpfs /run + +echo "System Init"`), 0o777}, + "etc/inittab": {[]byte(fmt.Sprintf(`# Run system init +::sysinit:/etc/init.d/rcS + +# Run the provision script, prefix the output with a log prefix +::wait:/bin/sh -c '/usr/local/bin/provision.sh 2>&1 | sed "s/^/%s/"' + +# Reboot the system after the script +# Running the poweroff or halt commands inside a Linux guest will bring it down but Firecracker process remains unaware of the guest shutdown so it lives on. +# Running the reboot command in a Linux guest will gracefully bring down the guest system and also bring a graceful end to the Firecracker process. +::once:/usr/bin/busybox reboot + +# Clean shutdown of filesystems and swap +::shutdown:/usr/bin/busybox swapoff -a +::shutdown:/usr/bin/busybox umount -a -r -v +`, logExternalPrefix)), 0o777}, + }, + ) + if err != nil { + return nil, fmt.Errorf("error creating layer from files: %w", err) + } + + symlinkLayer, err := LayerSymlink( + map[string]string{ + // Enable envd service autostart + "etc/systemd/system/multi-user.target.wants/envd.service": "etc/systemd/system/envd.service", + // Enable chrony service autostart + "etc/systemd/system/multi-user.target.wants/chrony.service": "etc/systemd/system/chrony.service", + }, + ) + if err != nil { + return nil, fmt.Errorf("error creating layer from symlinks: %w", err) + } + + return []containerregistry.Layer{ + filesLayer, + symlinkLayer, + }, nil +} diff --git a/packages/orchestrator/internal/template/build/tar.go b/packages/orchestrator/internal/template/build/tar.go new file mode 100644 index 0000000..4f1045a --- /dev/null +++ b/packages/orchestrator/internal/template/build/tar.go @@ -0,0 +1,83 @@ +package build + +import ( + "archive/tar" + "bytes" + "io" + "sort" + + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +type layerFile struct { + Bytes []byte + Mode int64 // Permission and mode bits +} + +// LayerFile creates a layer from a single file map. These layers are reproducible and consistent. +// A filemap is a path -> file content map representing a file system. +func LayerFile(filemap map[string]layerFile) (containerregistry.Layer, error) { + b := &bytes.Buffer{} + w := tar.NewWriter(b) + + names := []string{} + for f := range filemap { + names = append(names, f) + } + sort.Strings(names) + + for _, f := range names { + c := filemap[f] + if err := w.WriteHeader(&tar.Header{ + Name: f, + Size: int64(len(c.Bytes)), + Mode: c.Mode, + }); err != nil { + return nil, err + } + if _, err := w.Write(c.Bytes); err != nil { + return nil, err + } + } + if err := w.Close(); err != nil { + return nil, err + } + + // Return a new copy of the buffer each time it's opened. + return tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil + }) +} + +// LayerSymlink creates a layer from a single symlink map. These layers are reproducible and consistent. +func LayerSymlink(symlinks map[string]string) (containerregistry.Layer, error) { + b := &bytes.Buffer{} + w := tar.NewWriter(b) + + names := make([]string, 0, len(symlinks)) + for name := range symlinks { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + target := symlinks[name] + if err := w.WriteHeader(&tar.Header{ + Name: name, + Mode: 0o777, + Typeflag: tar.TypeSymlink, + Linkname: target, + }); err != nil { + return nil, err + } + } + + if err := w.Close(); err != nil { + return nil, err + } + + return tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil + }) +} diff --git a/packages/orchestrator/internal/template/build/template_builder.go b/packages/orchestrator/internal/template/build/template_builder.go new file mode 100644 index 0000000..2ea3b91 --- /dev/null +++ b/packages/orchestrator/internal/template/build/template_builder.go @@ -0,0 +1,549 @@ +package build + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/config" + "github.com/e2b-dev/infra/packages/orchestrator/internal/proxy" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/fc" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" + templatelocal "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/template" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/ext4" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/oci" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/template" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + "github.com/e2b-dev/infra/packages/shared/pkg/env" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +type TemplateBuilder struct { + logger *zap.Logger + tracer trace.Tracer + + storage storage.StorageProvider + devicePool *nbd.DevicePool + networkPool *network.Pool + buildLogger *zap.Logger + templateStorage *template.Storage + artifactRegistry artifactsregistry.ArtifactsRegistry + proxy *proxy.SandboxProxy + sandboxes *smap.Map[*sandbox.Sandbox] +} + +const ( + templatesDirectory = "/tmp/build-templates" + + sbxTimeout = time.Hour + provisionTimeout = 5 * time.Minute + configurationTimeout = 5 * time.Minute + waitEnvdTimeout = 60 * time.Second + + cleanupTimeout = time.Second * 10 +) + +func NewBuilder( + logger *zap.Logger, + buildLogger *zap.Logger, + tracer trace.Tracer, + templateStorage *template.Storage, + storage storage.StorageProvider, + artifactRegistry artifactsregistry.ArtifactsRegistry, + devicePool *nbd.DevicePool, + networkPool *network.Pool, + proxy *proxy.SandboxProxy, + sandboxes *smap.Map[*sandbox.Sandbox], +) *TemplateBuilder { + return &TemplateBuilder{ + logger: logger, + tracer: tracer, + buildLogger: buildLogger, + templateStorage: templateStorage, + storage: storage, + artifactRegistry: artifactRegistry, + devicePool: devicePool, + networkPool: networkPool, + proxy: proxy, + sandboxes: sandboxes, + } +} + +type Result struct { + EnvdVersion string + RootfsSizeMB int64 +} + +// Build builds the template, uploads it to storage and returns the result metadata. +// It works the following: +// 1. Get docker image from the remote repository +// 2. Inject new file layers with the required setup for hostname, dns, envd service configuration, basic provisioning script that is run before most of VM services +// 3. Extract ext4 filesystem +// 4. Start FC VM with BusyBox init that runs just the provisioning script, wait for exit. This will install systemd, that is later used for proper VM boot. +// 5. Start the FC VM (using systemd) and wait for Envd +// 5. Run two additional commands: +// - configuration script (enable swap, create user, change folder permissions, etc.) +// - start command (if defined), together with the ready command (always with default value if not defined) +// +// 6. Snapshot +// 7. Upload template +func (b *TemplateBuilder) Build(ctx context.Context, template *TemplateConfig) (r *Result, e error) { + ctx, childSpan := b.tracer.Start(ctx, "build") + defer childSpan.End() + + logsWriter := template.BuildLogsWriter + postProcessor := writer.NewPostProcessor(ctx, logsWriter) + go postProcessor.Start() + defer func() { + postProcessor.Stop(e) + }() + + envdVersion, err := GetEnvdVersion(ctx) + if err != nil { + return nil, fmt.Errorf("error getting envd version: %w", err) + } + + templateCacheFiles, err := template.NewTemplateCacheFiles() + if err != nil { + return nil, fmt.Errorf("error creating template files: %w", err) + } + + templateBuildDir := filepath.Join(templatesDirectory, template.BuildId) + err = os.MkdirAll(templateBuildDir, 0o777) + if err != nil { + return nil, fmt.Errorf("error creating template build directory: %w", err) + } + defer func() { + err := os.RemoveAll(templateBuildDir) + if err != nil { + b.logger.Error("Error while removing template build directory", zap.Error(err)) + } + }() + + // Created here to be able to pass it to CreateSandbox for populating COW cache + rootfsPath := filepath.Join(templateBuildDir, rootfsBuildFileName) + + rootfs, memfile, buildConfig, err := Build( + ctx, + b.tracer, + template, + postProcessor, + b.artifactRegistry, + templateBuildDir, + rootfsPath, + ) + if err != nil { + return nil, fmt.Errorf("error building environment: %w", err) + } + + localTemplate := templatelocal.NewLocalTemplate(templateCacheFiles, rootfs, memfile) + defer localTemplate.Close() + + // Provision sandbox with systemd and other vital parts + postProcessor.WriteMsg("Provisioning sandbox template") + // Just a symlink to the rootfs build file, so when the COW cache deletes the underlying file (here symlink), + // it will not delete the rootfs file. We use the rootfs again later on to start the sandbox template. + rootfsProvisionPath := filepath.Join(templateBuildDir, rootfsProvisionLink) + err = os.Symlink(rootfsPath, rootfsProvisionPath) + if err != nil { + return nil, fmt.Errorf("error creating provision rootfs: %w", err) + } + + err = b.provisionSandbox(ctx, postProcessor, template, envdVersion, localTemplate, rootfsProvisionPath) + if err != nil { + return nil, fmt.Errorf("error provisioning sandbox: %w", err) + } + + // Check the rootfs filesystem corruption + ext4Check, err := ext4.CheckIntegrity(rootfsPath, true) + if err != nil { + zap.L().Error("provisioned filesystem ext4 integrity", + zap.String("result", ext4Check), + zap.Error(err), + ) + return nil, fmt.Errorf("error checking provisioned filesystem integrity: %w", err) + } + zap.L().Debug("provisioned filesystem ext4 integrity", + zap.String("result", ext4Check), + ) + + err = b.enlargeDiskAfterProvisioning(ctx, template, rootfsPath) + if err != nil { + return nil, fmt.Errorf("error enlarging disk after provisioning: %w", err) + } + + err = rootfs.UpdateSize() + if err != nil { + return nil, fmt.Errorf("error updating rootfs size: %w", err) + } + + // Create sandbox for building template + postProcessor.WriteMsg("Creating sandbox template") + sbx, cleanup, err := sandbox.CreateSandbox( + ctx, + b.tracer, + b.networkPool, + b.devicePool, + template.ToSandboxConfig(envdVersion), + localTemplate, + sbxTimeout, + rootfsPath, + fc.ProcessOptions{ + InitScriptPath: systemdInitPath, + KernelLogs: env.IsDevelopment(), + SystemdToKernelLogs: false, + }, + config.AllowSandboxInternet, + ) + defer func() { + cleanupErr := cleanup.Run(ctx) + if cleanupErr != nil { + b.logger.Error("Error cleaning up sandbox", zap.Error(cleanupErr)) + } + }() + if err != nil { + return nil, fmt.Errorf("error creating sandbox: %w", err) + } + err = sbx.WaitForEnvd( + ctx, + b.tracer, + waitEnvdTimeout, + ) + if err != nil { + return nil, fmt.Errorf("failed to wait for sandbox start: %w", err) + } + // Add to proxy so we can call envd commands + b.sandboxes.Insert(sbx.Metadata.Config.SandboxId, sbx) + defer func() { + b.sandboxes.Remove(sbx.Metadata.Config.SandboxId) + b.proxy.RemoveFromPool(sbx.Metadata.Config.ExecutionId) + }() + + // Run configuration script + var scriptDef bytes.Buffer + err = ConfigureScriptTemplate.Execute(&scriptDef, map[string]string{}) + if err != nil { + return nil, fmt.Errorf("error executing provision script: %w", err) + } + + configCtx, configCancel := context.WithTimeout(ctx, configurationTimeout) + defer configCancel() + err = b.runCommand( + configCtx, + postProcessor, + "config", + sbx.Metadata.Config.SandboxId, + scriptDef.String(), + "root", + nil, + map[string]string{}, + ) + if err != nil { + return nil, fmt.Errorf("error running configuration script: %w", err) + } + + // Env variables for the start command and ready command + envVars := oci.ParseEnvs(buildConfig.Env) + + // Start command + commandsCtx, commandsCancel := context.WithCancel(ctx) + defer commandsCancel() + + var startCmd errgroup.Group + startCmdConfirm := make(chan struct{}) + if template.StartCmd != "" { + postProcessor.WriteMsg("Running start command") + startCmd.Go(func() error { + cwd := "/home/user" + err := b.runCommandWithConfirmation( + commandsCtx, + postProcessor, + "start", + sbx.Metadata.Config.SandboxId, + template.StartCmd, + "root", + &cwd, + envVars, + startCmdConfirm, + ) + // If the ctx is canceled, the ready command succeeded and no start command await is necessary. + if err != nil && !errors.Is(err, context.Canceled) { + // Cancel the ready command context, so the ready command does not wait anymore if an error occurs. + commandsCancel() + return fmt.Errorf("error running start command: %w", err) + } + + return nil + }) + } else { + // If no start command is defined, we still need to confirm that the start command has started. + close(startCmdConfirm) + } + + // Ready command + err = b.runReadyCommand( + commandsCtx, + postProcessor, + template, + sbx.Metadata.Config.SandboxId, + envVars, + ) + if err != nil { + return nil, fmt.Errorf("error running ready command: %w", err) + } + + // Wait for the start command to start executing. + select { + case <-ctx.Done(): + return nil, fmt.Errorf("error waiting for start command: %w", commandsCtx.Err()) + case <-startCmdConfirm: + } + // Cancel the start command context (it's running in the background anyway). + // If it has already finished, check the error. + commandsCancel() + err = startCmd.Wait() + if err != nil { + return nil, fmt.Errorf("error running start command: %w", err) + } + + // Pause sandbox + postProcessor.WriteMsg("Pausing sandbox template") + snapshot, err := sbx.Pause( + ctx, + b.tracer, + templateCacheFiles, + ) + if err != nil { + return nil, fmt.Errorf("error processing vm: %w", err) + } + + // Upload + postProcessor.WriteMsg("Uploading template") + uploadErrCh := b.uploadTemplate( + ctx, + template.TemplateFiles, + snapshot, + ) + + uploadErr := <-uploadErrCh + if uploadErr != nil { + return nil, fmt.Errorf("error uploading template: %w", uploadErr) + } + + return &Result{ + EnvdVersion: envdVersion, + RootfsSizeMB: template.RootfsSizeMB(), + }, nil +} + +func (b *TemplateBuilder) uploadTemplate( + ctx context.Context, + templateFiles *storage.TemplateFiles, + snapshot *sandbox.Snapshot, +) chan error { + errCh := make(chan error, 1) + + go func() { + // Remove build files if build fails or times out + var err error + defer func() { + if err != nil { + removeCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) + defer cancel() + + removeErr := b.templateStorage.Remove(removeCtx, templateFiles.BuildId) + if removeErr != nil { + telemetry.ReportError(ctx, "error while removing build files", removeErr) + } + } + }() + defer func() { + err := snapshot.Close(ctx) + if err != nil { + zap.L().Error("error closing snapshot", zap.Error(err), logger.WithBuildID(templateFiles.BuildId), logger.WithTemplateID(templateFiles.TemplateId)) + } + }() + defer close(errCh) + + templateBuild := storage.NewTemplateBuild( + snapshot.MemfileDiffHeader, + snapshot.RootfsDiffHeader, + b.storage, + templateFiles, + ) + + memfileDiffPath, err := snapshot.MemfileDiff.CachePath() + if err != nil { + errCh <- fmt.Errorf("error getting memfile diff path: %w", err) + return + } + + rootfsDiffPath, err := snapshot.RootfsDiff.CachePath() + if err != nil { + errCh <- fmt.Errorf("error getting rootfs diff path: %w", err) + return + } + + snapfilePath := snapshot.Snapfile.Path() + + uploadErrCh := templateBuild.Upload( + ctx, + snapfilePath, + &memfileDiffPath, + &rootfsDiffPath, + ) + + // Wait for the upload to finish + err = <-uploadErrCh + if err != nil { + errCh <- fmt.Errorf("error uploading template build: %w", err) + return + } + + errCh <- nil + }() + + return errCh +} + +func (b *TemplateBuilder) provisionSandbox( + ctx context.Context, + postProcessor *writer.PostProcessor, + template *TemplateConfig, + envdVersion string, + localTemplate *templatelocal.LocalTemplate, + rootfsPath string, +) (e error) { + ctx, childSpan := b.tracer.Start(ctx, "provision-sandbox") + defer childSpan.End() + + logsWriter := &writer.PrefixFilteredWriter{Writer: postProcessor, PrefixFilter: logExternalPrefix} + defer logsWriter.Close() + + sbx, cleanup, err := sandbox.CreateSandbox( + ctx, + b.tracer, + b.networkPool, + b.devicePool, + template.ToSandboxConfig(envdVersion), + localTemplate, + provisionTimeout, + rootfsPath, + fc.ProcessOptions{ + InitScriptPath: busyBoxInitPath, + // Always show kernel logs during the provisioning phase, + // the sandbox is then started with systemd and without kernel logs. + KernelLogs: true, + + // Show provision script logs to the user + Stdout: logsWriter, + Stderr: logsWriter, + }, + // Allow sandbox internet access during provisioning + true, + ) + defer func() { + cleanupErr := cleanup.Run(ctx) + if cleanupErr != nil { + e = fmt.Errorf("error cleaning up sandbox: %w", cleanupErr) + } + }() + if err != nil { + return fmt.Errorf("error creating sandbox: %w", err) + } + err = sbx.WaitForExit( + ctx, + b.tracer, + ) + if err != nil { + return fmt.Errorf("failed to wait for sandbox start: %w", err) + } + + // Verify the provisioning script exit status + exitStatus, err := ext4.ReadFile(ctx, b.tracer, rootfsPath, provisionScriptResultPath) + if err != nil { + return fmt.Errorf("error reading provision result: %w", err) + } + defer ext4.RemoveFile(ctx, b.tracer, rootfsPath, provisionScriptResultPath) + + // Fallback to "1" if the file is empty or not found + if exitStatus == "" { + exitStatus = "1" + } + if exitStatus != "0" { + return fmt.Errorf("provision script failed with exit status: %s", exitStatus) + } + + return nil +} + +func (b *TemplateBuilder) enlargeDiskAfterProvisioning( + ctx context.Context, + template *TemplateConfig, + rootfsPath string, +) error { + // Resize rootfs to accommodate for the provisioning script size change + rootfsFreeSpace, err := ext4.GetFreeSpace(ctx, b.tracer, rootfsPath, template.RootfsBlockSize()) + if err != nil { + return fmt.Errorf("error getting free space: %w", err) + } + sizeDiff := template.DiskSizeMB<> 20 +} + +func (e *TemplateConfig) MemfilePageSize() int64 { + if e.HugePages { + return header.HugepageSize + } + + return header.PageSize +} + +func (e *TemplateConfig) RootfsBlockSize() int64 { + return header.RootfsBlockSize +} + +func (e *TemplateConfig) ToSandboxConfig(envdVersion string) *orchestrator.SandboxConfig { + return &orchestrator.SandboxConfig{ + TemplateId: e.TemplateId, + BuildId: e.BuildId, + KernelVersion: e.KernelVersion, + FirecrackerVersion: e.FirecrackerVersion, + HugePages: e.HugePages, + SandboxId: instanceBuildPrefix + id.Generate(), + ExecutionId: uuid.New().String(), + EnvdVersion: envdVersion, + Vcpu: e.VCpuCount, + RamMb: e.MemoryMB, + + BaseTemplateId: e.TemplateId, + } +} diff --git a/packages/orchestrator/internal/template/build/writer/filtered_writer.go b/packages/orchestrator/internal/template/build/writer/filtered_writer.go new file mode 100644 index 0000000..9359bc4 --- /dev/null +++ b/packages/orchestrator/internal/template/build/writer/filtered_writer.go @@ -0,0 +1,83 @@ +package writer + +import ( + "bytes" + "io" + "strings" +) + +type PrefixFilteredWriter struct { + io.Writer + PrefixFilter string + buff bytes.Buffer +} + +// Write will split the input on newlines and post each line as a new log entry +// to the logger. +func (w *PrefixFilteredWriter) Write(bs []byte) (n int, err error) { + n = len(bs) + for len(bs) > 0 { + bs = w.writeLine(bs) + } + + return n, nil +} + +// writeLine writes a single line from the input, returning the remaining, +// unconsumed bytes. +func (w *PrefixFilteredWriter) writeLine(line []byte) (remaining []byte) { + idx := bytes.IndexByte(line, '\n') + if idx < 0 { + // If there are no newlines, buffer the entire string. + w.buff.Write(line) + return nil + } + + // Split on the newline, buffer and flush the left. + line, remaining = line[:idx], line[idx+1:] + + // Fast path: if we don't have a partial message from a previous write + // in the buffer, skip the buffer and log directly. + if w.buff.Len() == 0 { + w.log(line) + return + } + + w.buff.Write(line) + + // Log empty messages in the middle of the stream so that we don't lose + // information when the user writes "foo\n\nbar". + w.flush(true) + + return remaining +} + +func (w *PrefixFilteredWriter) Close() error { + return w.Sync() +} + +func (w *PrefixFilteredWriter) Sync() error { + // Don't allow empty messages on explicit Sync calls or on Close + // because we don't want an extraneous empty message at the end of the + // stream -- it's common for files to end with a newline. + w.flush(false) + return nil +} + +func (w *PrefixFilteredWriter) flush(allowEmpty bool) { + if allowEmpty || w.buff.Len() > 0 { + w.log(w.buff.Bytes()) + } + w.buff.Reset() +} + +// log writes the buffered line to the underlying writer, filtering in only +// the prefixed messages. It removes the configured prefix from the line. +func (w *PrefixFilteredWriter) log(b []byte) { + line := string(b) + noPrefixLine := strings.TrimPrefix(line, w.PrefixFilter) + if w.PrefixFilter == "" || noPrefixLine != line { + toWrite := []byte(noPrefixLine + "\n") + w.Writer.Write(toWrite) + } +} diff --git a/packages/orchestrator/internal/template/build/writer/postprocessor.go b/packages/orchestrator/internal/template/build/writer/postprocessor.go new file mode 100644 index 0000000..c880f38 --- /dev/null +++ b/packages/orchestrator/internal/template/build/writer/postprocessor.go @@ -0,0 +1,76 @@ +package writer + +import ( + "context" + "fmt" + "io" + "sync" + "time" +) + +const tickerInterval = 5 * time.Second + +type PostProcessor struct { + errChan chan error + ctx context.Context + writer io.Writer + ticker *time.Ticker + + stopOnce sync.Once +} + +// Start starts the post-processing. +func (p *PostProcessor) Start() { + p.WriteMsg("Starting postprocessing") + startTime := time.Now() + + for { + msg := "..." + + select { + case postprocessingErr := <-p.errChan: + p.WriteMsg(msg) + + if postprocessingErr != nil { + p.WriteMsg(fmt.Sprintf("Postprocessing failed: %s", postprocessingErr)) + return + } + p.WriteMsg(fmt.Sprintf("Postprocessing finished. Took %s. Cleaning up...", time.Since(startTime).Truncate(time.Second).String())) + + return + case <-p.ctx.Done(): + return + case <-p.ticker.C: + p.WriteMsg(msg) + } + } +} + +func (p *PostProcessor) Stop(err error) { + p.stopOnce.Do(func() { + p.errChan <- err + }) +} + +func (p *PostProcessor) WriteMsg(message string) { + p.ticker.Reset(tickerInterval) + p.writer.Write([]byte(prefixWithTimestamp(message + "\n"))) +} + +func (p *PostProcessor) Write(b []byte) (n int, err error) { + p.ticker.Reset(tickerInterval) + return p.writer.Write([]byte(prefixWithTimestamp(string(b)))) +} + +func NewPostProcessor(ctx context.Context, writer io.Writer) *PostProcessor { + return &PostProcessor{ + ctx: ctx, + writer: writer, + errChan: make(chan error, 1), + ticker: time.NewTicker(tickerInterval), + } +} + +func prefixWithTimestamp(message string) string { + return fmt.Sprintf("[%s] %s", time.Now().Format(time.RFC3339), message) +} diff --git a/packages/template-manager/internal/build/rootfs_test.go b/packages/orchestrator/internal/template/build/writer/postprocessor_test.go similarity index 68% rename from packages/template-manager/internal/build/rootfs_test.go rename to packages/orchestrator/internal/template/build/writer/postprocessor_test.go index 1407005..072dbff 100644 --- a/packages/template-manager/internal/build/rootfs_test.go +++ b/packages/orchestrator/internal/template/build/writer/postprocessor_test.go @@ -1,4 +1,4 @@ -package build +package writer import ( "context" @@ -6,6 +6,7 @@ import ( "strings" "sync" "testing" + "time" ) // test writer that stores the written data @@ -21,6 +22,13 @@ func (w *testWriter) Write(p []byte) (n int, err error) { return len(p), nil } +func (w *testWriter) Data() []byte { + w.mu.Lock() + defer w.mu.Unlock() + + return w.data +} + func TestPostProcessor_Start(t *testing.T) { type fields struct { testErr error @@ -41,13 +49,12 @@ func TestPostProcessor_Start(t *testing.T) { name: "test success", fields: fields{ testErr: nil, - shouldContain: "Postprocessing finished. ", + shouldContain: "Postprocessing finished.", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tw := &testWriter{} ctx := context.TODO() errChan := make(chan error) @@ -56,15 +63,24 @@ func TestPostProcessor_Start(t *testing.T) { ctx: ctx, writer: tw, errChan: errChan, + ticker: time.NewTicker(tickerInterval), } - go p.Start() - p.stop(tt.fields.testErr) - close(errChan) - if !strings.Contains(string(tw.data), tt.fields.shouldContain) { - t.Errorf("expected data to contain %s, got %s", tt.fields.shouldContain, string(tw.data)) - } + end := make(chan struct{}, 1) + go func() { + p.Start() + + end <- struct{}{} + }() + p.Stop(tt.fields.testErr) + // Wait for the start goroutine to finish + <-end + + logs := string(tw.Data()) + if !strings.Contains(logs, tt.fields.shouldContain) { + t.Errorf("expected data to contain %s, got %s", tt.fields.shouldContain, logs) + } }) } } diff --git a/packages/orchestrator/internal/template/build/writer/writer.go b/packages/orchestrator/internal/template/build/writer/writer.go new file mode 100644 index 0000000..7760188 --- /dev/null +++ b/packages/orchestrator/internal/template/build/writer/writer.go @@ -0,0 +1,19 @@ +package writer + +import ( + "go.uber.org/zap" +) + +type BuildLogsWriter struct { + logger *zap.Logger +} + +func (w BuildLogsWriter) Write(p []byte) (n int, err error) { + w.logger.Info(string(p)) + return len(p), nil +} + +func New(logger *zap.Logger) BuildLogsWriter { + writer := BuildLogsWriter{logger: logger} + return writer +} diff --git a/packages/orchestrator/internal/template/cache/build_cache.go b/packages/orchestrator/internal/template/cache/build_cache.go new file mode 100644 index 0000000..0150e14 --- /dev/null +++ b/packages/orchestrator/internal/template/cache/build_cache.go @@ -0,0 +1,175 @@ +package cache + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/jellydator/ttlcache/v3" + "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" + + template_manager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +const ( + buildInfoExpiration = time.Minute * 10 // 10 minutes +) + +type BuildInfo struct { + status template_manager.TemplateBuildState + metadata *template_manager.TemplateBuildMetadata + mu sync.RWMutex + ctx context.Context + ctxCancel context.CancelFunc +} + +func (b *BuildInfo) IsRunning() bool { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.status == template_manager.TemplateBuildState_Building +} + +func (b *BuildInfo) IsFailed() bool { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.status == template_manager.TemplateBuildState_Failed +} + +func (b *BuildInfo) GetMetadata() *template_manager.TemplateBuildMetadata { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.metadata +} + +func (b *BuildInfo) GetStatus() template_manager.TemplateBuildState { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.status +} + +func (b *BuildInfo) GetContext() context.Context { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.ctx +} + +func (b *BuildInfo) Cancel() { + b.mu.Lock() + defer b.mu.Unlock() + + b.ctxCancel() +} + +type BuildCache struct { + cache *ttlcache.Cache[string, *BuildInfo] + + mu sync.Mutex +} + +func NewBuildCache(meterProvider metric.MeterProvider) *BuildCache { + meter := meterProvider.Meter("orchestrator.cache.build") + + cache := ttlcache.New(ttlcache.WithTTL[string, *BuildInfo](buildInfoExpiration)) + _, err := telemetry.GetObservableUpDownCounter(meter, telemetry.BuildCounterMeterName, func(ctx context.Context, observer metric.Int64Observer) error { + items := utils.MapValues(cache.Items()) + + // Filter running builds + runningCount := len(utils.Filter(items, func(item *ttlcache.Item[string, *BuildInfo]) bool { + return item != nil && item.Value() != nil && item.Value().IsRunning() + })) + + observer.Observe(int64(runningCount)) + return nil + }) + if err != nil { + zap.L().Error("error creating counter", zap.Error(err), zap.Any("counter_name", telemetry.BuildCounterMeterName)) + } + + go cache.Start() + + return &BuildCache{ + cache: cache, + } +} + +// Get returns the build info. +func (c *BuildCache) Get(buildID string) (*BuildInfo, error) { + item := c.cache.Get(buildID) + if item == nil { + return nil, fmt.Errorf("build %s not found in cache", buildID) + } + + value := item.Value() + if value == nil { + return nil, fmt.Errorf("build %s not found in cache", buildID) + } + + return value, nil +} + +// Create creates a new build if it doesn't exist in the cache or the build was already finished. +func (c *BuildCache) Create(buildID string) (*BuildInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + + item := c.cache.Get(buildID, ttlcache.WithDisableTouchOnHit[string, *BuildInfo]()) + if item != nil { + return nil, fmt.Errorf("build %s already exists in cache", buildID) + } + + ctx, cancel := context.WithCancel(context.Background()) + + info := &BuildInfo{ + status: template_manager.TemplateBuildState_Building, + metadata: nil, + ctx: ctx, + ctxCancel: cancel, + } + + c.cache.Set(buildID, info, buildInfoExpiration) + + return info, nil +} + +func (c *BuildCache) SetSucceeded(buildID string, metadata *template_manager.TemplateBuildMetadata) error { + c.mu.Lock() + defer c.mu.Unlock() + + item, err := c.Get(buildID) + if err != nil { + return fmt.Errorf("build %s not found in cache: %w", buildID, err) + } + + item.status = template_manager.TemplateBuildState_Completed + item.metadata = metadata + return nil +} + +func (c *BuildCache) SetFailed(buildID string) error { + c.mu.Lock() + defer c.mu.Unlock() + + item, err := c.Get(buildID) + if err != nil { + return fmt.Errorf("build %s not found in cache: %w", buildID, err) + } + + item.status = template_manager.TemplateBuildState_Failed + return nil +} + +func (c *BuildCache) Delete(buildID string) { + c.mu.Lock() + defer c.mu.Unlock() + + c.cache.Delete(buildID) +} diff --git a/packages/orchestrator/internal/template/constants/service.go b/packages/orchestrator/internal/template/constants/service.go new file mode 100644 index 0000000..1d07868 --- /dev/null +++ b/packages/orchestrator/internal/template/constants/service.go @@ -0,0 +1,3 @@ +package constants + +const ServiceNameTemplate = "template-manager" diff --git a/packages/orchestrator/internal/template/server/create_template.go b/packages/orchestrator/internal/template/server/create_template.go new file mode 100644 index 0000000..0a0ba62 --- /dev/null +++ b/packages/orchestrator/internal/template/server/create_template.go @@ -0,0 +1,112 @@ +package server + +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build/writer" + templatemanager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +func (s *ServerStore) TemplateCreate(ctx context.Context, templateRequest *templatemanager.TemplateCreateRequest) (*emptypb.Empty, error) { + _, childSpan := s.tracer.Start(ctx, "template-create") + defer childSpan.End() + + config := templateRequest.Template + childSpan.SetAttributes( + telemetry.WithTemplateID(config.TemplateID), + attribute.String("env.build.id", config.BuildID), + attribute.String("env.kernel.version", config.KernelVersion), + attribute.String("env.firecracker.version", config.FirecrackerVersion), + attribute.String("env.start_cmd", config.StartCommand), + attribute.Int64("env.memory_mb", int64(config.MemoryMB)), + attribute.Int64("env.vcpu_count", int64(config.VCpuCount)), + attribute.Bool("env.huge_pages", config.HugePages), + ) + + if s.healthStatus == templatemanager.HealthState_Draining { + s.logger.Error("Requesting template creation while server is draining is not possible", logger.WithTemplateID(config.TemplateID)) + return nil, fmt.Errorf("server is draining") + } + + logsWriter := writer.New( + s.buildLogger. + With(zap.Field{Type: zapcore.StringType, Key: "envID", String: config.TemplateID}). + With(zap.Field{Type: zapcore.StringType, Key: "buildID", String: config.BuildID}), + ) + + template := &build.TemplateConfig{ + TemplateFiles: storage.NewTemplateFiles( + config.TemplateID, + config.BuildID, + config.KernelVersion, + config.FirecrackerVersion, + ), + VCpuCount: int64(config.VCpuCount), + MemoryMB: int64(config.MemoryMB), + StartCmd: config.StartCommand, + ReadyCmd: config.ReadyCommand, + DiskSizeMB: int64(config.DiskSizeMB), + BuildLogsWriter: logsWriter, + HugePages: config.HugePages, + } + + buildInfo, err := s.buildCache.Create(config.BuildID) + if err != nil { + return nil, fmt.Errorf("error while creating build cache: %w", err) + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + defer buildInfo.Cancel() + + buildContext, buildSpan := s.tracer.Start( + trace.ContextWithSpanContext(buildInfo.GetContext(), childSpan.SpanContext()), + "template-background-build", + ) + defer buildSpan.End() + + res, err := s.builder.Build(buildContext, template) + // Wait for the CLI to load all the logs + // This is a temporary ~fix for the CLI to load most of the logs before finishing the template build + // Ideally we should wait in the CLI for the last log message + time.Sleep(8 * time.Second) + if err != nil { + s.reportBuildFailed(buildContext, template, err) + return + } + + buildMetadata := &templatemanager.TemplateBuildMetadata{RootfsSizeKey: int32(template.RootfsSizeMB()), EnvdVersionKey: res.EnvdVersion} + err = s.buildCache.SetSucceeded(template.BuildId, buildMetadata) + if err != nil { + s.reportBuildFailed(buildContext, template, fmt.Errorf("error while setting build state to succeeded: %w", err)) + return + } + + telemetry.ReportEvent(buildContext, "Environment built") + }() + + return nil, nil +} + +func (s *ServerStore) reportBuildFailed(ctx context.Context, config *build.TemplateConfig, err error) { + telemetry.ReportCriticalError(ctx, "error while building template", err) + cacheErr := s.buildCache.SetFailed(config.BuildId) + if cacheErr != nil { + s.logger.Error("Error while setting build state to failed", zap.Error(err)) + } + + telemetry.ReportEvent(ctx, "Environment built failed") +} diff --git a/packages/orchestrator/internal/template/server/delete_template.go b/packages/orchestrator/internal/template/server/delete_template.go new file mode 100644 index 0000000..a7643f5 --- /dev/null +++ b/packages/orchestrator/internal/template/server/delete_template.go @@ -0,0 +1,45 @@ +package server + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/template" + templatemanager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +func (s *ServerStore) TemplateBuildDelete(ctx context.Context, in *templatemanager.TemplateBuildDeleteRequest) (*emptypb.Empty, error) { + childCtx, childSpan := s.tracer.Start(ctx, "template-delete-request", trace.WithAttributes( + telemetry.WithTemplateID(in.TemplateID), + telemetry.WithBuildID(in.BuildID), + )) + defer childSpan.End() + + s.wg.Add(1) + defer s.wg.Done() + + if in.TemplateID == "" || in.BuildID == "" { + return nil, errors.New("template id and build id are required fields") + } + + c, err := s.buildCache.Get(in.BuildID) + if err == nil { + // Only handle if the build is in the cache + zap.L().Info("Canceling running template build", logger.WithTemplateID(in.TemplateID), logger.WithBuildID(in.TemplateID)) + telemetry.ReportEvent(ctx, "cancel in progress template build") + c.Cancel() + } + + err = template.Delete(childCtx, s.tracer, s.artifactsregistry, s.templateStorage, in.TemplateID, in.BuildID) + if err != nil { + return nil, err + } + + return nil, nil +} diff --git a/packages/orchestrator/internal/template/server/health_status.go b/packages/orchestrator/internal/template/server/health_status.go new file mode 100644 index 0000000..a9656e1 --- /dev/null +++ b/packages/orchestrator/internal/template/server/health_status.go @@ -0,0 +1,18 @@ +package server + +import ( + "context" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" +) + +func (s *ServerStore) HealthStatus(ctx context.Context, req *emptypb.Empty) (*template_manager.HealthStatusResponse, error) { + _, ctxSpan := s.tracer.Start(ctx, "health-status-request") + defer ctxSpan.End() + + return &template_manager.HealthStatusResponse{ + Status: s.healthStatus, + }, nil +} diff --git a/packages/orchestrator/internal/template/server/main.go b/packages/orchestrator/internal/template/server/main.go new file mode 100644 index 0000000..b5f141b --- /dev/null +++ b/packages/orchestrator/internal/template/server/main.go @@ -0,0 +1,122 @@ +package server + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/grpcserver" + "github.com/e2b-dev/infra/packages/orchestrator/internal/proxy" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/build" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/cache" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/template" + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + "github.com/e2b-dev/infra/packages/shared/pkg/env" + templatemanager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" + "github.com/e2b-dev/infra/packages/shared/pkg/storage" +) + +type ServerStore struct { + templatemanager.UnimplementedTemplateServiceServer + tracer trace.Tracer + logger *zap.Logger + builder *build.TemplateBuilder + buildCache *cache.BuildCache + buildLogger *zap.Logger + templateStorage *template.Storage + artifactsregistry artifactsregistry.ArtifactsRegistry + healthStatus templatemanager.HealthState + wg *sync.WaitGroup // wait group for running builds +} + +func New( + ctx context.Context, + tracer trace.Tracer, + meterProvider metric.MeterProvider, + logger *zap.Logger, + buildLogger *zap.Logger, + grpc *grpcserver.GRPCServer, + networkPool *network.Pool, + devicePool *nbd.DevicePool, + proxy *proxy.SandboxProxy, + sandboxes *smap.Map[*sandbox.Sandbox], +) (*ServerStore, error) { + logger.Info("Initializing template manager") + + persistence, err := storage.GetTemplateStorageProvider(ctx) + if err != nil { + return nil, fmt.Errorf("error getting template storage provider: %v", err) + } + + artifactsregistry, err := artifactsregistry.GetArtifactsRegistryProvider() + if err != nil { + return nil, fmt.Errorf("error getting artifacts registry provider: %v", err) + } + + templateStorage := template.NewStorage(persistence) + buildCache := cache.NewBuildCache(meterProvider) + builder := build.NewBuilder( + logger, + buildLogger, + tracer, + templateStorage, + persistence, + artifactsregistry, + devicePool, + networkPool, + proxy, + sandboxes, + ) + + store := &ServerStore{ + tracer: tracer, + logger: logger, + builder: builder, + buildCache: buildCache, + buildLogger: buildLogger, + artifactsregistry: artifactsregistry, + templateStorage: templateStorage, + healthStatus: templatemanager.HealthState_Healthy, + wg: &sync.WaitGroup{}, + } + + templatemanager.RegisterTemplateServiceServer(grpc.GRPCServer(), store) + + return store, nil +} + +func (s *ServerStore) Close(ctx context.Context) error { + select { + case <-ctx.Done(): + return errors.New("context canceled during server graceful shutdown") + default: + // no new jobs should be started + s.logger.Info("marking service as draining") + s.healthStatus = templatemanager.HealthState_Draining + // wait for registering the node as draining + if !env.IsLocal() { + time.Sleep(5 * time.Second) + } + + // wait for all builds to finish + s.logger.Info("waiting for all jobs to finish") + s.wg.Wait() + + if !env.IsLocal() { + // give some time so all connected services can check build status + s.logger.Info("waiting before shutting down server") + time.Sleep(15 * time.Second) + } + return nil + } +} diff --git a/packages/orchestrator/internal/template/server/template_status.go b/packages/orchestrator/internal/template/server/template_status.go new file mode 100644 index 0000000..782fb6a --- /dev/null +++ b/packages/orchestrator/internal/template/server/template_status.go @@ -0,0 +1,24 @@ +package server + +import ( + "context" + + "github.com/pkg/errors" + + template_manager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" +) + +func (s *ServerStore) TemplateBuildStatus(ctx context.Context, in *template_manager.TemplateStatusRequest) (*template_manager.TemplateBuildStatusResponse, error) { + _, ctxSpan := s.tracer.Start(ctx, "template-build-status-request") + defer ctxSpan.End() + + buildInfo, err := s.buildCache.Get(in.BuildID) + if err != nil { + return nil, errors.Wrap(err, "error while getting build info, maybe already expired") + } + + return &template_manager.TemplateBuildStatusResponse{ + Status: buildInfo.GetStatus(), + Metadata: buildInfo.GetMetadata(), + }, nil +} diff --git a/packages/orchestrator/internal/template/template/main.go b/packages/orchestrator/internal/template/template/main.go new file mode 100644 index 0000000..8702cd4 --- /dev/null +++ b/packages/orchestrator/internal/template/template/main.go @@ -0,0 +1,35 @@ +package template + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/trace" + + artifactsregistry "github.com/e2b-dev/infra/packages/shared/pkg/artifacts-registry" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +func Delete(ctx context.Context, tracer trace.Tracer, artifactRegistry artifactsregistry.ArtifactsRegistry, templateStorage *Storage, templateId string, buildId string) error { + childCtx, childSpan := tracer.Start(ctx, "delete-template") + defer childSpan.End() + + err := templateStorage.Remove(ctx, buildId) + if err != nil { + return fmt.Errorf("error when deleting template objects: %w", err) + } + + err = artifactRegistry.Delete(childCtx, templateId, buildId) + if err != nil { + // snapshot build are not stored in docker repository + if errors.Is(err, artifactsregistry.ErrImageNotExists) { + return nil + } + + telemetry.ReportEvent(childCtx, err.Error()) + return err + } + + return nil +} diff --git a/packages/orchestrator/internal/template/template/storage.go b/packages/orchestrator/internal/template/template/storage.go new file mode 100644 index 0000000..e64360e --- /dev/null +++ b/packages/orchestrator/internal/template/template/storage.go @@ -0,0 +1,31 @@ +package template + +import ( + "context" + "fmt" + + "github.com/e2b-dev/infra/packages/shared/pkg/storage" +) + +type Storage struct { + persistence storage.StorageProvider +} + +func NewStorage(persistence storage.StorageProvider) *Storage { + return &Storage{ + persistence: persistence, + } +} + +func (t *Storage) Remove(ctx context.Context, buildId string) error { + err := t.persistence.DeleteObjectsWithPrefix(ctx, buildId) + if err != nil { + return fmt.Errorf("error when removing template '%s': %w", buildId, err) + } + + return nil +} + +func (t *Storage) NewBuild(files *storage.TemplateFiles, persistence storage.StorageProvider) *storage.TemplateBuild { + return storage.NewTemplateBuild(nil, nil, persistence, files) +} diff --git a/packages/orchestrator/main.go b/packages/orchestrator/main.go index a8e6fbc..f391787 100644 --- a/packages/orchestrator/main.go +++ b/packages/orchestrator/main.go @@ -6,140 +6,369 @@ import ( "flag" "fmt" "log" + "math" + "net/http" "os" "os/signal" - "sync" - "sync/atomic" + "slices" "syscall" + "time" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + + "github.com/e2b-dev/infra/packages/orchestrator/internal/grpcserver" + "github.com/e2b-dev/infra/packages/orchestrator/internal/metrics" + "github.com/e2b-dev/infra/packages/orchestrator/internal/proxy" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/nbd" + "github.com/e2b-dev/infra/packages/orchestrator/internal/sandbox/network" "github.com/e2b-dev/infra/packages/orchestrator/internal/server" + "github.com/e2b-dev/infra/packages/orchestrator/internal/service" + "github.com/e2b-dev/infra/packages/orchestrator/internal/template/constants" + tmplserver "github.com/e2b-dev/infra/packages/orchestrator/internal/template/server" "github.com/e2b-dev/infra/packages/shared/pkg/env" + featureflags "github.com/e2b-dev/infra/packages/shared/pkg/feature-flags" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + sbxlogger "github.com/e2b-dev/infra/packages/shared/pkg/logger/sandbox" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" ) -const defaultPort = 5008 +type Closeable interface { + Close(context.Context) error +} -var commitSHA string +const ( + defaultPort = 5008 + defaultProxyPort = 5007 -func main() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + sandboxMetricExportPeriod = 5 * time.Second - sig, sigCancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM) - defer sigCancel() + version = "0.1.0" - var port uint + fileLockName = "/orchestrator.lock" +) - flag.UintVar(&port, "port", defaultPort, "orchestrator server port") +var ( + forceStop = env.GetEnv("FORCE_STOP", "false") == "true" + commitSHA string +) + +func main() { + port := flag.Uint("port", defaultPort, "orchestrator server port") + proxyPort := flag.Uint("proxy-port", defaultProxyPort, "orchestrator proxy port") flag.Parse() - wg := &sync.WaitGroup{} - exitCode := &atomic.Int32{} - telemetrySignal := make(chan struct{}) + if *port > math.MaxUint16 { + log.Fatalf("%d is larger than maximum possible port %d", port, math.MaxInt16) + } - // defer waiting on the waitgroup so that this runs even when - // there's a panic. - defer wg.Wait() - - if !env.IsLocal() { - shutdown := telemetry.InitOTLPExporter(ctx, server.ServiceName, "no") - wg.Add(1) - go func() { - defer wg.Done() - <-telemetrySignal - if err := shutdown(ctx); err != nil { - log.Printf("telemetry shutdown: %v", err) - exitCode.Add(1) + if *proxyPort > math.MaxUint16 { + log.Fatalf("%d is larger than maximum possible proxy port %d", proxyPort, math.MaxInt16) + } + + success := run(*port, *proxyPort) + + log.Println("Stopping orchestrator, success:", success) + + if success == false { + os.Exit(1) + } +} + +func run(port, proxyPort uint) (success bool) { + success = true + + services := service.GetServices() + + // Check if the orchestrator crashed and restarted + // Skip this check in development mode + // We don't want to lock if the service is running with force stop; the subsequent start would fail. + if !env.IsDevelopment() && !forceStop { + info, err := os.Stat(fileLockName) + if err == nil { + log.Fatalf("Orchestrator was already started at %s, exiting", info.ModTime()) + } + + f, err := os.Create(fileLockName) + if err != nil { + log.Fatalf("Failed to create lock file %s: %v", fileLockName, err) + } + defer func() { + fileErr := f.Close() + if fileErr != nil { + log.Printf("Failed to close lock file %s: %v", fileLockName, fileErr) + } + + // Remove the lock file on graceful shutdown + if success == true { + if fileErr = os.Remove(fileLockName); fileErr != nil { + log.Printf("Failed to remove lock file %s: %v", fileLockName, fileErr) + } } }() } - log.Println("Starting orchestrator", "commit", commitSHA) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sig, sigCancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM) + defer sigCancel() + + clientID := service.GetClientID() + if clientID == "" { + zap.L().Fatal("client ID is empty") + } - // Check if AWS is enabled and setup AWS configuration - if os.Getenv("AWS_ENABLED") == "true" { - log.Println("AWS is enabled, setting up AWS configuration") + serviceName := service.GetServiceName(services) + serviceError := make(chan error) + defer close(serviceError) - // AWS region must be set for the AWS SDK to work correctly - if os.Getenv("AWS_REGION") == "" { - log.Println("AWS_REGION is not set, defaulting to us-east-1") - os.Setenv("AWS_REGION", "us-east-1") + var g errgroup.Group + // defer waiting on the group so that this runs even when + // there's a panic. + defer func(g *errgroup.Group) { + err := g.Wait() + if err != nil { + log.Printf("error while shutting down: %v", err) + success = false } + }(&g) - // Verify S3 bucket name is set - if os.Getenv("TEMPLATE_AWS_BUCKET_NAME") == "" { - log.Fatalf("TEMPLATE_AWS_BUCKET_NAME must be set when AWS is enabled") + // Setup telemetry + var tel *telemetry.Client + if env.IsLocal() { + tel = telemetry.NewNoopClient() + } else { + var err error + tel, err = telemetry.New(ctx, serviceName, commitSHA, clientID) + if err != nil { + zap.L().Fatal("failed to create metrics exporter", zap.Error(err)) } + } + defer func() { + err := tel.Shutdown(ctx) + if err != nil { + log.Printf("error while shutting down metrics provider: %v", err) + success = false + } + }() - // Check AWS credentials (but don't fail if they're not set, as they might be provided through IAM roles) - if os.Getenv("AWS_ACCESS_KEY_ID") == "" || os.Getenv("AWS_SECRET_ACCESS_KEY") == "" { - log.Println("AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY not set. If not using IAM roles, authentication may fail.") + globalLogger := zap.Must(logger.NewLogger(ctx, logger.LoggerConfig{ + ServiceName: serviceName, + IsInternal: true, + IsDebug: env.IsDebug(), + Cores: []zapcore.Core{logger.GetOTELCore(tel.LogsProvider, serviceName)}, + EnableConsole: true, + })) + defer func(l *zap.Logger) { + err := l.Sync() + if err != nil { + log.Printf("error while shutting down logger: %v", err) + success = false } + }(globalLogger) + zap.ReplaceGlobals(globalLogger) - // Check if using temporary credentials - if os.Getenv("AWS_SESSION_TOKEN") != "" { - log.Println("Using temporary AWS credentials with session token") + sbxLoggerExternal := sbxlogger.NewLogger( + ctx, + tel.LogsProvider, + sbxlogger.SandboxLoggerConfig{ + ServiceName: serviceName, + IsInternal: false, + CollectorAddress: os.Getenv("LOGS_COLLECTOR_ADDRESS"), + }, + ) + defer func(l *zap.Logger) { + err := l.Sync() + if err != nil { + log.Printf("error while shutting down sandbox logger: %v", err) + success = false } + }(sbxLoggerExternal) + sbxlogger.SetSandboxLoggerExternal(sbxLoggerExternal) - log.Printf("AWS configuration: Region=%s, Bucket=%s", - os.Getenv("AWS_REGION"), - os.Getenv("TEMPLATE_AWS_BUCKET_NAME")) + sbxLoggerInternal := sbxlogger.NewLogger( + ctx, + tel.LogsProvider, + sbxlogger.SandboxLoggerConfig{ + ServiceName: serviceName, + IsInternal: true, + CollectorAddress: os.Getenv("LOGS_COLLECTOR_ADDRESS"), + }, + ) + defer func(l *zap.Logger) { + err := l.Sync() + if err != nil { + log.Printf("error while shutting down sandbox logger: %v", err) + success = false + } + }(sbxLoggerInternal) + sbxlogger.SetSandboxLoggerInternal(sbxLoggerInternal) + + log.Println("Starting orchestrator", "commit", commitSHA) + + // The sandbox map is shared between the server and the proxy + // to propagate information about sandbox routing. + sandboxes := smap.New[*sandbox.Sandbox]() + + sandboxProxy, err := proxy.NewSandboxProxy(tel.MeterProvider, proxyPort, sandboxes) + if err != nil { + zap.L().Fatal("failed to create sandbox proxy", zap.Error(err)) } - srv, err := server.New(ctx, port) + tracer := tel.TracerProvider.Tracer(serviceName) + + networkPool, err := network.NewPool(ctx, tel.MeterProvider, network.NewSlotsPoolSize, network.ReusedSlotsPoolSize, clientID, tracer) if err != nil { - log.Fatalf("failed to create server: %v", err) + zap.L().Fatal("failed to create network pool", zap.Error(err)) } - log.Println("Finised new server...") + devicePool, err := nbd.NewDevicePool(ctx, tel.MeterProvider) + if err != nil { + zap.L().Fatal("failed to create device pool", zap.Error(err)) + } - wg.Add(1) - go func() { - defer wg.Done() - var err error + serviceInfo := service.NewInfoContainer(clientID, version, commitSHA) - defer func() { - // recover the panic because the service manages a number of go routines - // that can panic, so catching this here allows for the rest of the process - // to terminate in a more orderly manner. - if perr := recover(); perr != nil { - // many of the panics use log.Panicf which means we're going to log - // some panic messages twice, but this seems ok, and temporary while - // we clean up logging. - log.Printf("caught panic in service: %v", perr) - exitCode.Add(1) - err = errors.Join(err, fmt.Errorf("server panic: %v", perr)) - } + grpcSrv := grpcserver.New(tel.TracerProvider, tel.MeterProvider, serviceInfo) + + featureFlags, err := featureflags.NewClient() + if err != nil { + zap.L().Fatal("failed to create feature flags client", zap.Error(err)) + } + + sandboxObserver, err := metrics.NewSandboxObserver(ctx, serviceInfo.SourceCommit, serviceInfo.ClientId, sandboxMetricExportPeriod, sandboxes) + if err != nil { + zap.L().Fatal("failed to create sandbox observer", zap.Error(err)) + } - // if we encountered an err, but the signal context was NOT canceled, then - // the outer context needs to be canceled so the remainder of the service - // can shutdown. - if err != nil && sig.Err() == nil { - log.Printf("service ended early without signal") - cancel() + _, err = server.New(ctx, grpcSrv, tel, networkPool, devicePool, tracer, serviceInfo, sandboxProxy, sandboxes, featureFlags) + if err != nil { + zap.L().Fatal("failed to create server", zap.Error(err)) + } + + tmplSbxLoggerExternal := sbxlogger.NewLogger( + ctx, + tel.LogsProvider, + sbxlogger.SandboxLoggerConfig{ + ServiceName: constants.ServiceNameTemplate, + IsInternal: false, + CollectorAddress: os.Getenv("LOGS_COLLECTOR_ADDRESS"), + }, + ) + defer func(l *zap.Logger) { + err := l.Sync() + if err != nil { + log.Printf("error while shutting down template manager sandbox logger: %v", err) + success = false + } + }(tmplSbxLoggerExternal) + + var closers []Closeable + closers = append(closers, + grpcSrv, + networkPool, + devicePool, + sandboxProxy, + featureFlags, + sandboxObserver, + ) + + // Initialize the template manager only if the service is enabled + if slices.Contains(services, service.TemplateManager) { + tmpl, err := tmplserver.New( + ctx, + tracer, + tel.MeterProvider, + globalLogger, + tmplSbxLoggerExternal, + grpcSrv, + networkPool, + devicePool, + sandboxProxy, + sandboxes, + ) + if err != nil { + zap.L().Fatal("failed to create template manager", zap.Error(err)) + } + + // Prepend to make sure it's awaited on graceful shutdown + closers = append([]Closeable{tmpl}, closers...) + } + + service.NewInfoService(ctx, grpcSrv.GRPCServer(), serviceInfo, sandboxes) + + g.Go(func() error { + zap.L().Info("Starting session proxy") + proxyErr := sandboxProxy.Start() + if proxyErr != nil && !errors.Is(proxyErr, http.ErrServerClosed) { + proxyErr = fmt.Errorf("proxy server: %w", proxyErr) + zap.L().Error("error starting proxy server", zap.Error(proxyErr)) + + select { + case serviceError <- proxyErr: + default: + // Don't block if the serviceError channel is already closed + // or if the error is already sent } - }() + return proxyErr + } + + return nil + }) + + g.Go(func() (err error) { // this sets the error declared above so the function // in the defer can check it. - if err = srv.Start(ctx); err != nil { - log.Printf("orchestrator service: %v", err) - exitCode.Add(1) + grpcErr := grpcSrv.Start(ctx, port) + if grpcErr != nil { + grpcErr = fmt.Errorf("grpc server: %w", grpcErr) + zap.L().Error("grpc server error", zap.Error(grpcErr)) + + select { + case serviceError <- grpcErr: + default: + // Don't block if the serviceError channel is already closed + // or if the error is already sent + } + + return grpcErr } - }() - wg.Add(1) - go func() { - defer wg.Done() - defer close(telemetrySignal) - <-sig.Done() - if err := srv.Close(ctx); err != nil { - log.Printf("grpc service: %v", err) - exitCode.Add(1) + return nil + }) + + // Wait for the shutdown signal or if some service fails + select { + case <-sig.Done(): + zap.L().Info("Shutdown signal received") + case serviceErr := <-serviceError: + zap.L().Error("Service error", zap.Error(serviceErr)) + } + + closeCtx, cancelCloseCtx := context.WithCancel(context.Background()) + defer cancelCloseCtx() + if forceStop { + cancelCloseCtx() + } + + for _, c := range closers { + zap.L().Info(fmt.Sprintf("Closing %T, forced: %v", c, forceStop)) + if err := c.Close(closeCtx); err != nil { + zap.L().Error("error during shutdown", zap.Error(err)) + success = false } - }() + } - wg.Wait() + zap.L().Info("Waiting for services to finish") + if err := g.Wait(); err != nil { + zap.L().Error("service group error", zap.Error(err)) + success = false + } - os.Exit(int(exitCode.Load())) + return success } diff --git a/packages/orchestrator/orchestrator.proto b/packages/orchestrator/orchestrator.proto index b84dacf..bded4e5 100644 --- a/packages/orchestrator/orchestrator.proto +++ b/packages/orchestrator/orchestrator.proto @@ -36,6 +36,9 @@ message SandboxConfig { string base_template_id = 17; optional bool auto_pause = 18; + + optional string envd_access_token = 19; + string execution_id = 20; } message SandboxCreateRequest { @@ -86,8 +89,6 @@ message SandboxListCachedBuildsResponse { repeated CachedBuildInfo builds = 1; } - - service SandboxService { rpc Create(SandboxCreateRequest) returns (SandboxCreateResponse); rpc Update(SandboxUpdateRequest) returns (google.protobuf.Empty); diff --git a/packages/orchestrator/template-manager.proto b/packages/orchestrator/template-manager.proto new file mode 100644 index 0000000..9e37982 --- /dev/null +++ b/packages/orchestrator/template-manager.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; + +option go_package = "https://github.com/e2b-dev/infra/template-manager"; + + +message TemplateConfig { + string templateID = 1; + string buildID = 2; + + int32 memoryMB = 3; + int32 vCpuCount = 4; + int32 diskSizeMB = 5; + + string kernelVersion = 6; + string firecrackerVersion = 7; + string startCommand = 8; + bool hugePages = 9; + + string readyCommand = 10; +} + +message TemplateCreateRequest { + TemplateConfig template = 1; +} + +message TemplateStatusRequest { + string templateID = 1; + string buildID = 2; +} + +// Data required for deleting a template. +message TemplateBuildDeleteRequest { + string buildID = 1; + string templateID = 2; +} + +message TemplateBuildMetadata { + int32 rootfsSizeKey = 1; + string envdVersionKey = 2; +} + +enum TemplateBuildState { + Building = 0; + Failed = 1; + Completed = 2; +} + +// Logs from template build +message TemplateBuildStatusResponse { + TemplateBuildState status = 1; + TemplateBuildMetadata metadata = 2; +} + +enum HealthState { + Healthy = 0; + Draining = 1; +} + +message HealthStatusResponse { + HealthState status = 1; +} + +// Interface exported by the server. +service TemplateService { + // TemplateCreate is a gRPC service that creates a new template + rpc TemplateCreate (TemplateCreateRequest) returns (google.protobuf.Empty); + + // TemplateStatus is a gRPC service that streams the status of a template build + rpc TemplateBuildStatus (TemplateStatusRequest) returns (TemplateBuildStatusResponse); + + // TemplateBuildDelete is a gRPC service that deletes files associated with a template build + rpc TemplateBuildDelete (TemplateBuildDeleteRequest) returns (google.protobuf.Empty); + + // todo (2025-05): this is deprecated, please use InfoService that is used for both orchestrator and template manager + rpc HealthStatus (google.protobuf.Empty) returns (HealthStatusResponse); +} diff --git a/packages/template-manager/upload-envs.sh b/packages/orchestrator/upload-envs.sh similarity index 85% rename from packages/template-manager/upload-envs.sh rename to packages/orchestrator/upload-envs.sh index 6ecece7..d008023 100644 --- a/packages/template-manager/upload-envs.sh +++ b/packages/orchestrator/upload-envs.sh @@ -3,7 +3,7 @@ set -euo pipefail # ------------------------------------------------------------------------------------------------- -# Upload envs from disk to AWS S3 +# Upload envs from disk to GCS # ------------------------------------------------------------------------------------------------- # First argument is target dir name TARGET_DIR_NAME=$1 @@ -15,9 +15,9 @@ TEMPLATE_BUCKET_NAME=$2 TEMPLATE_ID=$3 # ------------------------------------------------------------------------------------------------- -echo "Uploading envs from ${TARGET_DIR_NAME} to S3 bucket ${TEMPLATE_BUCKET_NAME}" +echo "Uploading envs from ${TARGET_DIR_NAME} to GCS" -COMMAND="aws s3 cp --quiet --cache-control no-cache,max-age=0" +COMMAND="gcloud storage cp --verbosity error -n" # Initialize counter for uploaded envs uploaded_env_count=0 @@ -46,7 +46,7 @@ for template_id in $(ls ${TARGET_DIR_NAME}); do BUILD_ID=$(cat ${TARGET_DIR_NAME}/${template_id}/build_id) echo "Build ID: ${BUILD_ID}" - # Upload env to S3 with aws s3 cp, copy only "memfile", "rootfs.ext4" and "snapfile" from the dir + # Upload env to GCS via gcloud storage cp, copy only "memfile", "rootfs.ext4" and "snapfile" from the dir # First get and print the paths to the files MEMFILE_PATH=$(ls ${TARGET_DIR_NAME}/${template_id}/memfile) ROOTFS_EXT4_PATH=$(ls ${TARGET_DIR_NAME}/${template_id}/rootfs.ext4) @@ -68,9 +68,9 @@ for template_id in $(ls ${TARGET_DIR_NAME}); do continue fi - BUCKET_MEMFILE_PATH="s3://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/memfile" - BUCKET_ROOTFS_EXT4_PATH="s3://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/rootfs.ext4" - BUCKET_SNAPFILE_PATH="s3://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/snapfile" + BUCKET_MEMFILE_PATH="gs://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/memfile" + BUCKET_ROOTFS_EXT4_PATH="gs://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/rootfs.ext4" + BUCKET_SNAPFILE_PATH="gs://${TEMPLATE_BUCKET_NAME}/${BUILD_ID}/snapfile" # Upload the files echo "Uploading memfile" diff --git a/packages/orchestrator/upload-orchestrator.sh b/packages/orchestrator/upload-orchestrator.sh new file mode 100644 index 0000000..3568765 --- /dev/null +++ b/packages/orchestrator/upload-orchestrator.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -euo pipefail + +GCP_PROJECT_ID=$1 + +chmod +x bin/orchestrator + +# Orchestrator binary (same binary, different path) +gsutil -h "Cache-Control:no-cache, max-age=0" \ + cp bin/orchestrator "gs://${GCP_PROJECT_ID}-fc-env-pipeline/orchestrator" diff --git a/packages/orchestrator/upload-template-manager.sh b/packages/orchestrator/upload-template-manager.sh new file mode 100644 index 0000000..156c9e4 --- /dev/null +++ b/packages/orchestrator/upload-template-manager.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -euo pipefail + +GCP_PROJECT_ID=$1 + +chmod +x bin/orchestrator + +# Template Manager binary (same binary, different path) +gsutil -h "Cache-Control:no-cache, max-age=0" \ + cp bin/orchestrator "gs://${GCP_PROJECT_ID}-fc-env-pipeline/template-manager" diff --git a/packages/orchestrator/upload.sh b/packages/orchestrator/upload.sh deleted file mode 100644 index 59c2e93..0000000 --- a/packages/orchestrator/upload.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -AWS_BUCKET_NAME=$1 -AWS_REGION=${2:-us-east-1} - -chmod +x bin/orchestrator - -aws s3 cp bin/orchestrator "s3://${AWS_BUCKET_NAME}/orchestrator" \ - --region ${AWS_REGION} \ - --cache-control "no-cache, max-age=0" diff --git a/packages/shared/Makefile b/packages/shared/Makefile index 25712e4..7f9d1f1 100644 --- a/packages/shared/Makefile +++ b/packages/shared/Makefile @@ -7,72 +7,39 @@ generate-fc: .PHONY: generate-models generate-models: - rm -rf pkg/models/* - go generate ./pkg/generate_models.go + @echo "Generating models..." + @find pkg/models/* -not -path "pkg/models/clickhouse*" -delete + @go generate ./pkg/generate_models.go + @echo "Done" + +.PHONY: build-base-template +build-base-template: + @echo "Building base template..." + @E2B_DOMAIN=$(DOMAIN_NAME) DOCKER_CLI_DEBUG=1 e2b tpl build -p scripts + @echo "Done" .PHONY: prep-cluster prep-cluster: - @echo "Setting up AWS configuration..." - @mkdir -p ~/.e2b - @echo '{"email":"admin@example.com","teamId":"00000000-0000-0000-0000-000000000000","accessToken":"e2b_access_token","teamApiKey":"e2b_team_api_key","cloud":"aws","provider":"aws","region":"us-east-1"}' > ~/.e2b/config.json @echo "Seeding database..." - @export AWS_ENABLED=true - @export E2B_CLOUD=aws - @export E2B_REGION=us-east-1 - @export DB_HOST=database-e2b.cluster-cqjwsuyea136.us-east-1.rds.amazonaws.com - @if [ -z "$(POSTGRES_CONNECTION_STRING)" ]; then \ - export POSTGRES_CONNECTION_STRING="postgresql://postgres:xxxxx1.rds.amazonaws.com:5432/postgres"; \ - fi - @echo $(POSTGRES_CONNECTION_STRING) - @E2B_CLOUD=aws AWS_ENABLED=true E2B_REGION=us-east-1 POSTGRES_CONNECTION_STRING=$(POSTGRES_CONNECTION_STRING) DB_HOST=database-e2b.cluster-cqjwsuyea136.us-east-1.rds.amazonaws.com DB_USER=postgres DB_PASSWORD=xxxx DB_NAME=postgres DB_PORT=5432 ./scripts/aws-seed.sh - @echo "Building base template for AWS..." - @E2B_DOMAIN=$(DOMAIN_NAME) E2B_CLOUD=aws E2B_REGION=us-east-1 e2b tpl build -p scripts + @POSTGRES_CONNECTION_STRING=$(POSTGRES_CONNECTION_STRING) go run ./scripts/seed/postgres/seed-db.go + $(MAKE) build-base-template + +.PHONY: migrate-postgres +migrate-postgres:migrate-postgres/up +migrate-postgres:migrate-postgres/down +migrate-postgres/%: + @echo "Applying Postgres migration *$(notdir $@)*" + @POSTGRES_CONNECTION_STRING=$(POSTGRES_CONNECTION_STRING) go run ./scripts/migrate/postgres/main.go -direction $(notdir $@) @echo "Done" -.PHONY: migrate -migrate: - @echo "Generating migrations..." - @cat ./migrations/* > ./migration.sql - @echo "Applying migration" - @if [ -z "$(POSTGRES_CONNECTION_STRING)" ]; then \ - echo "Error: POSTGRES_CONNECTION_STRING is not set"; \ - exit 1; \ - fi - @echo "Using direct PSQL approach instead of Go script to avoid dependency issues" - @echo "Using connection string: $(POSTGRES_CONNECTION_STRING)" - @cat ./migration.sql | PGPASSWORD=$(shell echo "$(POSTGRES_CONNECTION_STRING)" | sed -n 's/.*password=\([^@]*\).*/\1/p') /usr/bin/psql -h $(shell echo "$(POSTGRES_CONNECTION_STRING)" | sed -n 's/.*@\([^:]*\).*/\1/p') -U postgres -d postgres - @rm ./migration.sql +.PHONY: migrate-clickhouse +migrate-clickhouse:migrate-clickhouse/up +migrate-clickhouse:migrate-clickhouse/down +migrate-clickhouse/%: + @echo "Applying ClickHouse migration *$(notdir $@)*" + @CLICKHOUSE_CONNECTION_STRING=$(CLICKHOUSE_CONNECTION_STRING) CLICKHOUSE_USERNAME=$(CLICKHOUSE_USERNAME) CLICKHOUSE_PASSWORD=$(CLICKHOUSE_PASSWORD) CLICKHOUSE_DATABASE=$(CLICKHOUSE_DATABASE) go run ./scripts/migrate/clickhouse/main.go -direction $(notdir $@) @echo "Done" .PHONY: test test: go test -v ./pkg/... - -.PHONY: prep-aws -prep-aws: - @echo "Creating AWS configuration..." - @mkdir -p ~/.e2b - @echo '{"email":"admin@example.com","teamId":"00000000-0000-0000-0000-000000000000","accessToken":"e2b_access_token","teamApiKey":"e2b_team_api_key","cloud":"aws","provider":"aws","region":"us-east-1"}' > ~/.e2b/config.json - @echo "Created AWS config file" - - @echo "Setting AWS environment variables" - @export AWS_ENABLED=true - @export E2B_CLOUD=aws - @export E2B_REGION=us-east-1 - @export POSTGRES_CONNECTION_STRING="xxxxx:5432/xxx" - @export DB_HOST=xxxxxx - @export DB_USER=postgres - @export DB_PASSWORD=xxxxxx - @export DB_NAME=postgres - @export DB_PORT=5432 - - @echo "Running migrations first" - @make migrate - - @echo "Seeding database for AWS" - @E2B_CLOUD=aws AWS_ENABLED=true ./scripts/aws-seed.sh - - @echo "Building base template for AWS" - @E2B_CLOUD=aws E2B_REGION=us-east-1 e2b tpl build -p scripts || echo "Template build skipped - please run manually with AWS credentials" - - @echo "AWS configuration complete" diff --git a/packages/shared/go.mod b/packages/shared/go.mod index d4ac5c3..b9803a1 100644 --- a/packages/shared/go.mod +++ b/packages/shared/go.mod @@ -1,133 +1,207 @@ module github.com/e2b-dev/infra/packages/shared -go 1.24.0 - -toolchain go1.24.2 +go 1.24.3 require ( - cloud.google.com/go/storage v1.38.0 + cloud.google.com/go/artifactregistry v1.16.0 + cloud.google.com/go/storage v1.50.0 + connectrpc.com/connect v1.18.1 entgo.io/ent v0.12.5 + github.com/ClickHouse/clickhouse-go/v2 v2.33.1 github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.9 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 - github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 - github.com/bits-and-blooms/bitset v1.17.0 - github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.14 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74 + github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 + github.com/bits-and-blooms/bitset v1.22.0 github.com/dchest/uniuri v1.2.0 - github.com/gin-gonic/gin v1.7.7 - github.com/go-openapi/errors v0.20.4 - github.com/go-openapi/runtime v0.26.0 - github.com/go-openapi/strfmt v0.21.7 - github.com/go-openapi/swag v0.22.4 - github.com/go-openapi/validate v0.22.1 + github.com/getkin/kin-openapi v0.132.0 + github.com/gin-gonic/gin v1.10.1 + github.com/go-openapi/errors v0.22.0 + github.com/go-openapi/runtime v0.28.0 + github.com/go-openapi/strfmt v0.23.0 + github.com/go-openapi/swag v0.23.0 + github.com/go-openapi/validate v0.24.0 + github.com/golang-migrate/migrate/v4 v4.18.2 + github.com/google/go-containerregistry v0.20.5 github.com/google/uuid v1.6.0 - github.com/googleapis/gax-go/v2 v2.12.1 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 + github.com/googleapis/gax-go/v2 v2.14.0 + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 + github.com/launchdarkly/go-sdk-common/v3 v3.1.0 + github.com/launchdarkly/go-server-sdk/v7 v7.10.0 github.com/lib/pq v1.10.9 + github.com/oapi-codegen/runtime v1.1.1 github.com/orcaman/concurrent-map/v2 v2.0.1 - github.com/rs/zerolog v1.33.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.57.0 - go.opentelemetry.io/otel v1.32.0 // Compatible with otelgin v0.46.1 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 - go.opentelemetry.io/otel/metric v1.32.0 - go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/sdk/metric v1.19.0 - go.opentelemetry.io/otel/trace v1.32.0 - go.uber.org/zap v1.18.1 + go.opentelemetry.io/contrib/bridges/otelzap v0.9.0 + go.opentelemetry.io/otel v1.36.0 + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 + go.opentelemetry.io/otel/log v0.10.0 + go.opentelemetry.io/otel/metric v1.36.0 + go.opentelemetry.io/otel/sdk v1.36.0 + go.opentelemetry.io/otel/sdk/log v0.10.0 + go.opentelemetry.io/otel/sdk/metric v1.35.0 + go.opentelemetry.io/otel/trace v1.36.0 + go.uber.org/zap v1.27.0 + golang.org/x/mod v0.24.0 golang.org/x/sync v0.14.0 - google.golang.org/api v0.166.0 - google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.1 + google.golang.org/api v0.214.0 + google.golang.org/grpc v1.72.1 + google.golang.org/protobuf v1.36.6 + gotest.tools v2.2.0+incompatible ) require ( - ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 // indirect - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.6 // indirect - github.com/agext/levenshtein v1.2.1 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + ariga.io/atlas v0.15.0 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.2.2 // indirect + cloud.google.com/go/longrunning v0.6.3 // indirect + cloud.google.com/go/monitoring v1.21.2 // indirect + github.com/ClickHouse/ch-go v0.65.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect + github.com/aws/smithy-go v1.22.3 // indirect + github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v28.1.1+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v28.1.1+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/gabriel-vasile/mimetype v1.4.6 // indirect - github.com/gin-contrib/sse v0.1.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/gin-contrib/sse v1.0.0 // indirect + github.com/go-faster/city v1.0.1 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/inflect v0.19.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/spec v0.20.8 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.22.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect - github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/go-playground/validator/v10 v10.26.0 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/launchdarkly/ccache v1.1.0 // indirect + github.com/launchdarkly/eventsource v1.8.0 // indirect + github.com/launchdarkly/go-jsonstream/v3 v3.1.0 // indirect + github.com/launchdarkly/go-sdk-events/v3 v3.5.0 // indirect + github.com/launchdarkly/go-semver v1.0.3 // indirect + github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/paulmach/orb v0.11.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/segmentio/asm v1.2.0 // indirect + github.com/sergi/go-diff v1.1.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect + github.com/zclconf/go-cty v1.14.1 // indirect + github.com/zeebo/errs v1.4.0 // indirect + go.mongodb.org/mongo-driver v1.17.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect + go.opentelemetry.io/proto/otlp v1.6.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/arch v0.16.0 // indirect golang.org/x/crypto v0.38.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect + golang.org/x/net v0.40.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + golang.org/x/time v0.11.0 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.5.2 // indirect ) - -// No longer needed as they are already in the main require block - -require ( - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect - github.com/aws/smithy-go v1.22.2 // indirect -) - -// Using older versions directly in requirements to avoid dependency conflicts diff --git a/packages/shared/go.sum b/packages/shared/go.sum index 40e685e..54db51d 100644 --- a/packages/shared/go.sum +++ b/packages/shared/go.sum @@ -1,38 +1,78 @@ -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO48VHLAZimqiaZYnMKk8= -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= +ariga.io/atlas v0.15.0 h1:9lwSVcO/D3WgaCzstSGqR1hEDtsGibu6JqUofEI/0sY= +ariga.io/atlas v0.15.0/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= -cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/artifactregistry v1.16.0 h1:BZpz0x8HCG7hwTkD+GlUwPQVFGOo9w84t8kxQwwc0DA= +cloud.google.com/go/artifactregistry v1.16.0/go.mod h1:LunXo4u2rFtvJjrGjO0JS+Gs9Eco2xbZU6JVJ4+T8Sk= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= +cloud.google.com/go/longrunning v0.6.3 h1:A2q2vuyXysRcwzqDpMMLSI6mb6o39miS52UEG/Rd2ng= +cloud.google.com/go/longrunning v0.6.3/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= +cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= +connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw= +connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ClickHouse/ch-go v0.65.1 h1:SLuxmLl5Mjj44/XbINsK2HFvzqup0s6rwKLFH347ZhU= +github.com/ClickHouse/ch-go v0.65.1/go.mod h1:bsodgURwmrkvkBe5jw1qnGDgyITsYErfONKAHn05nv4= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/ClickHouse/clickhouse-go/v2 v2.33.1 h1:Z5nO/AnmUywcw0AvhAD0M1C2EaMspnXRK9vEOLxgmI0= +github.com/ClickHouse/clickhouse-go/v2 v2.33.1/go.mod h1:cb1Ss8Sz8PZNdfvEBwkMAdRhoyB6/HiB6o3We5ZIcE4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0/go.mod h1:6fTWu4m3jocfUZLYF5KsZC1TUfRvEjs7lM4crme/irw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0 h1:jJKWl98inONJAr/IZrdFQUWcwUO95DLY1XMD1ZIut+g= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0/go.mod h1:l2fIqmwB+FKSfvn3bAD/0i+AXAxhIZjTK2svT/mgUXs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 h1:GYUJLfvd++4DMuMhCFLgLXvFwofIxh/qOwoGuS/LTew= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= -github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= -github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74 h1:+1lc5oMFFHlVBclPXQf/POqlvdpBzjLaN2c3ujDCcZw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.74/go.mod h1:EiskBoFr4SpYnFIbw8UM7DP7CacQXDHEmJqLI1xpRFI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= @@ -41,152 +81,154 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0 h1:E+UTVTDH6XTSjqxHWRuY8nB6s+05UllneWxnycplHFk= +github.com/aws/aws-sdk-go-v2/service/ecr v1.44.0/go.mod h1:iQ1skgw1XRK+6Lgkb0I9ODatAP72WoTILh0zXQ5DtbU= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1 h1:4nm2G6A4pV9rdlWzGMPv4BNtQp22v1hg3yrtkYpeLl8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.1/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3 h1:BRXS0U76Z8wfF+bnkilA2QwpIch6URlm++yPUt9QPmQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.79.3/go.mod h1:bNXKFFyaiVvWuR6O16h/I1724+aXe/tAkA9/QS01t5k= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/bits-and-blooms/bitset v1.22.0 h1:Tquv9S8+SGaS3EhyA+up3FXzmkhxPGjQQCkcs2uw7w4= +github.com/bits-and-blooms/bitset v1.22.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= +github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= +github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= +github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/uniuri v1.2.0 h1:koIcOUdrTIivZgSLhHQvKgqdWZq5d7KdMEWF1Ud6+5g= github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4kxhkY= +github.com/dhui/dktest v0.4.4 h1:+I4s6JRE1yGuqflzwqG+aIaMdgXIorCf5P98JnaAWa8= +github.com/dhui/dktest v0.4.4/go.mod h1:4+22R4lgsdAXrDyaH4Nqx2JEz2hLp49MqQmm9HLCQhM= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k= +github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I= +github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= -github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/getkin/kin-openapi v0.132.0 h1:3ISeLMsQzcb5v26yeJrBcdTCEQTag36ZjaGk7MIRUwk= +github.com/getkin/kin-openapi v0.132.0/go.mod h1:3OlG51PCYNsPByuiMB0t4fjnNlIDnaEDsjiKUV8nL58= +github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= +github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= +github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= +github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= -github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= -github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= +github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -195,8 +237,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -206,42 +248,51 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-containerregistry v0.20.5 h1:4RnlYcDs5hoA++CeFjlbZ/U9Yp1EuWr+UhhTyYQjOP0= +github.com/google/go-containerregistry v0.20.5/go.mod h1:Q14vdOOzug02bwnhMkZKD4e30pDaD9W65qzXpyzF49E= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= -github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= -github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= +github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= +github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -249,79 +300,114 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/launchdarkly/ccache v1.1.0 h1:voD1M+ZJXR3MREOKtBwgTF9hYHl1jg+vFKS/+VAkR2k= +github.com/launchdarkly/ccache v1.1.0/go.mod h1:TlxzrlnzvYeXiLHmesMuvoZetu4Z97cV1SsdqqBJi1Q= +github.com/launchdarkly/eventsource v1.8.0 h1:o9TL53lINP9PCrKESlpIZADvN+eHWlSVmAzZDZ+FEA0= +github.com/launchdarkly/eventsource v1.8.0/go.mod h1:IBckHy1VOjJGqSg07EJJLiUnk5DPunX9LKD9vbcgeHo= +github.com/launchdarkly/go-jsonstream/v3 v3.1.0 h1:U/7/LplZO72XefBQ+FzHf6o4FwLHVqBE+4V58Ornu/E= +github.com/launchdarkly/go-jsonstream/v3 v3.1.0/go.mod h1:2Pt4BR5AwWgsuVTCcIpB6Os04JFIKWfoA+7faKkZB5E= +github.com/launchdarkly/go-sdk-common/v3 v3.1.0 h1:KNCP5rfkOt/25oxGLAVgaU1BgrZnzH9Y/3Z6I8bMwDg= +github.com/launchdarkly/go-sdk-common/v3 v3.1.0/go.mod h1:mXFmDGEh4ydK3QilRhrAyKuf9v44VZQWnINyhqbbOd0= +github.com/launchdarkly/go-sdk-events/v3 v3.5.0 h1:Yav8Thm70dZbO8U1foYwZPf3w60n/lNBRaYeeNM/qg4= +github.com/launchdarkly/go-sdk-events/v3 v3.5.0/go.mod h1:oepYWQ2RvvjfL2WxkE1uJJIuRsIMOP4WIVgUpXRPcNI= +github.com/launchdarkly/go-semver v1.0.3 h1:agIy/RN3SqeQDIfKkl+oFslEdeIs7pgsJBs3CdCcGQM= +github.com/launchdarkly/go-semver v1.0.3/go.mod h1:xFmMwXba5Mb+3h72Z+VeSs9ahCvKo2QFUTHRNHVqR28= +github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1 h1:rTgcYAFraGFj7sBMB2b7JCYCm0b9kph4FaMX02t4osQ= +github.com/launchdarkly/go-server-sdk-evaluation/v3 v3.0.1/go.mod h1:fPS5d+zOsgFnMunj+Ki6jjlZtFvo4h9iNbtNXxzYn58= +github.com/launchdarkly/go-server-sdk/v7 v7.10.0 h1:LK6+nEAf3884WqH0rZvrEXDJFkNPMAYt+wylCoSsaRM= +github.com/launchdarkly/go-server-sdk/v7 v7.10.0/go.mod h1:G2aEvHogBRuak5Xsqj22YKjz0bGd2rlkrQ1917NVo+s= +github.com/launchdarkly/go-test-helpers/v2 v2.2.0 h1:L3kGILP/6ewikhzhdNkHy1b5y4zs50LueWenVF0sBbs= +github.com/launchdarkly/go-test-helpers/v2 v2.2.0/go.mod h1:L7+th5govYp5oKU9iN7To5PgznBuIjBPn+ejqKR0avw= +github.com/launchdarkly/go-test-helpers/v3 v3.0.2 h1:rh0085g1rVJM5qIukdaQ8z1XTWZztbJ49vRZuveqiuU= +github.com/launchdarkly/go-test-helpers/v3 v3.0.2/go.mod h1:u2ZvJlc/DDJTFrshWW50tWMZHLVYXofuSHUfTU/eIwM= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= +github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -329,92 +415,99 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= +github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= +github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= +go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.32.0 h1:ht6IqV6njVN4cMHYpN7pX5oDXZqGtl4fqvbGax1QFNU= -go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.32.0/go.mod h1:1126nNcUXEt2PRo3E5pJ4x98Gyu6K+bQIl5KECEJ6Qk= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/contrib/propagators/b3 v1.7.0 h1:oRAenUhj+GFttfIp3gj7HYVzBhPOHgq/dWPDSmLCXSY= -go.opentelemetry.io/contrib/propagators/b3 v1.7.0/go.mod h1:gXx7AhL4xXCF42gpm9dQvdohoDa2qeyEx4eIIxqK+h4= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= -go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/otelzap v0.9.0 h1:f+xpAfhQTjR8beiSMe1bnT/25PkeyWmOcI+SjXWguNw= +go.opentelemetry.io/contrib/bridges/otelzap v0.9.0/go.mod h1:T1Z1jyS5FttgQoF6UcGhnM+gF9wU32B4lHO69nXw4FE= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 h1:5dTKu4I5Dn4P2hxyW3l3jTaZx9ACgg0ECos1eAVrheY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/log v0.10.0 h1:1CXmspaRITvFcjA4kyVszuG4HjA61fPDxMb7q3BuyF0= +go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw= +go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI= +go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/arch v0.16.0 h1:foMtLTdyOmIniqWCHjY6+JxuC54XP1fDwx4N0ASyW+U= +golang.org/x/arch v0.16.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA= +golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -424,105 +517,72 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= +google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -533,24 +593,23 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/packages/shared/migration.sql b/packages/shared/migration.sql deleted file mode 100644 index c776e71..0000000 --- a/packages/shared/migration.sql +++ /dev/null @@ -1,718 +0,0 @@ -CREATE SCHEMA IF NOT EXISTS auth; - --- Create RLS policies for user management -DO $$ - BEGIN - BEGIN - IF NOT EXISTS ( - SELECT 1 - FROM pg_roles - WHERE rolname = 'authenticated' - ) THEN - EXECUTE 'CREATE ROLE authenticated;'; - END IF; - END; - END $$; -; - --- Create RLS policies for user management -DO $$ - BEGIN - IF NOT EXISTS ( - SELECT 1 - FROM pg_proc p - JOIN pg_namespace n ON p.pronamespace = n.oid - WHERE p.proname = 'uid' AND n.nspname = 'auth' - ) THEN - EXECUTE 'CREATE FUNCTION auth.uid() RETURNS uuid AS $func$ - BEGIN - RETURN gen_random_uuid(); - END; - $func$ LANGUAGE plpgsql;'; - END IF; - END; -$$; - - --- Create "users" table -CREATE TABLE IF NOT EXISTS "auth"."users" -( - "id" uuid NOT NULL DEFAULT gen_random_uuid(), - "email" text NOT NULL, - PRIMARY KEY ("id") -); --- Add new schema named "auth" -CREATE SCHEMA IF NOT EXISTS "auth"; -CREATE SCHEMA IF NOT EXISTS "extensions"; --- Create "tiers" table -CREATE TABLE "public"."tiers" -( - "id" text NOT NULL, - "name" text NOT NULL, - "vcpu" bigint NOT NULL default '2'::bigint, - "ram_mb" bigint NOT NULL DEFAULT '512'::bigint, - "disk_mb" bigint NOT NULL DEFAULT '512'::bigint, - "concurrent_instances" bigint NOT NULL, - PRIMARY KEY ("id"), - constraint tiers_concurrent_sessions_check check ((concurrent_instances > 0)), - constraint tiers_disk_mb_check check ((disk_mb > 0)), - constraint tiers_ram_mb_check check ((ram_mb > 0)), - constraint tiers_vcpu_check check ((vcpu > 0)) -); -ALTER TABLE "public"."tiers" ENABLE ROW LEVEL SECURITY; - - -COMMENT ON COLUMN public.tiers.concurrent_instances - IS 'The number of instances the team can run concurrently'; - --- Create "teams" table -CREATE TABLE "public"."teams" -( - "id" uuid DEFAULT gen_random_uuid(), - "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, - "is_default" boolean NOT NULL, - "is_blocked" boolean NOT NULL DEFAULT FALSE, - "name" text NOT NULL, - "tier" text NOT NULL, - PRIMARY KEY ("id"), - CONSTRAINT "teams_tiers_teams" FOREIGN KEY ("tier") REFERENCES "public"."tiers" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION -); -ALTER TABLE "public"."teams" ENABLE ROW LEVEL SECURITY; - --- Create "envs" table -CREATE TABLE "public"."envs" -( - "id" text NOT NULL, - "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, - "updated_at" timestamptz NOT NULL, - "dockerfile" text NOT NULL, - "public" boolean NOT NULL DEFAULT FALSE, - "build_id" uuid NOT NULL, - "build_count" integer NOT NULL DEFAULT 1, - "spawn_count" bigint NOT NULL DEFAULT '0'::bigint, - "last_spawned_at" timestamptz NULL, - "team_id" uuid NOT NULL, - PRIMARY KEY ("id"), - CONSTRAINT "envs_teams_envs" FOREIGN KEY ("team_id") REFERENCES "public"."teams" ("id") ON UPDATE NO ACTION ON DELETE NO ACTION -); -ALTER TABLE "public"."envs" ENABLE ROW LEVEL SECURITY; - -COMMENT ON COLUMN public.envs.last_spawned_at - IS 'Timestamp of the last time the env was spawned'; -COMMENT ON COLUMN public.envs.spawn_count - IS 'Number of times the env was spawned'; - --- Create "env_aliases" table -CREATE TABLE "public"."env_aliases" -( - "alias" text NOT NULL, - "is_name" boolean NOT NULL DEFAULT true, - "env_id" text NULL, - PRIMARY KEY ("alias"), - CONSTRAINT "env_aliases_envs_env_aliases" FOREIGN KEY ("env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE -); -ALTER TABLE "public"."env_aliases" ENABLE ROW LEVEL SECURITY; - --- Create "team_api_keys" table -CREATE TABLE "public"."team_api_keys" -( - "api_key" character varying(44) NOT NULL, - "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, - "team_id" uuid NOT NULL, - PRIMARY KEY ("api_key"), - CONSTRAINT "team_api_keys_teams_team_api_keys" FOREIGN KEY ("team_id") REFERENCES "public"."teams" ("id") ON UPDATE NO ACTION ON DELETE CASCADE -); -ALTER TABLE "public"."team_api_keys" ENABLE ROW LEVEL SECURITY; - --- Create "users" table -CREATE TABLE IF NOT EXISTS "auth"."users" -( - "id" uuid NOT NULL DEFAULT gen_random_uuid(), - "email" character varying(255) NOT NULL, - PRIMARY KEY ("id") -); - --- Create "access_tokens" table -CREATE TABLE "public"."access_tokens" -( - "access_token" text NOT NULL, - "user_id" uuid NOT NULL, - "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY ("access_token"), - CONSTRAINT "access_tokens_users_access_tokens" FOREIGN KEY ("user_id") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE CASCADE -); -ALTER TABLE "public"."access_tokens" ENABLE ROW LEVEL SECURITY; - --- Create "users_teams" table -CREATE TABLE "public"."users_teams" -( - "id" bigint NOT NULL GENERATED BY DEFAULT AS IDENTITY, - "user_id" uuid NOT NULL, - "team_id" uuid NOT NULL, - PRIMARY KEY ("id"), - CONSTRAINT "users_teams_teams_teams" FOREIGN KEY ("team_id") REFERENCES "public"."teams" ("id") ON UPDATE NO ACTION ON DELETE CASCADE, - CONSTRAINT "users_teams_users_users" FOREIGN KEY ("user_id") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE CASCADE -); -ALTER TABLE "public"."users_teams" ENABLE ROW LEVEL SECURITY; - --- Create RLS policies -DO $$ -BEGIN - BEGIN - CREATE POLICY "Allow selection for users that are in the team" - ON "public"."teams" - AS PERMISSIVE - FOR SELECT - TO authenticated - USING ((auth.uid() IN ( SELECT users_teams.user_id - FROM users_teams - WHERE (users_teams.team_id = teams.id)))); - - CREATE POLICY "Enable select for users in relevant team" - ON "public"."users_teams" - AS PERMISSIVE - FOR SELECT - TO authenticated - USING ((auth.uid() = user_id)); - - CREATE POLICY "Enable select for users based on user_id" - ON public.access_tokens - AS PERMISSIVE - FOR SELECT - TO authenticated - USING ((auth.uid() = user_id)); - - - CREATE POLICY "Allow selection for users that are in the team" - ON "public"."team_api_keys" - AS PERMISSIVE - FOR SELECT - TO authenticated - USING ((auth.uid() IN ( SELECT users_teams.user_id - FROM users_teams - WHERE (users_teams.team_id = team_api_keys.team_id)))); - EXCEPTION WHEN undefined_function - THEN RAISE NOTICE 'Policy were not created, probably because the function auth.uid() does not exist.'; - END; -END $$; - --- Create index "usersteams_team_id_user_id" to table: "users_teams" -CREATE UNIQUE INDEX "usersteams_team_id_user_id" ON "public"."users_teams" ("team_id", "user_id"); --- Add base tier -INSERT INTO public.tiers (id, name, vcpu, ram_mb, disk_mb, concurrent_instances) VALUES ('base_v1', 'Base tier', 2, 512, 512, 20); - --- Create user for triggers -CREATE USER trigger_user; -GRANT trigger_user TO postgres; - -GRANT CREATE, USAGE ON SCHEMA public TO trigger_user; -GRANT USAGE ON SCHEMA extensions TO trigger_user; -GRANT USAGE ON SCHEMA auth TO trigger_user; - -GRANT SELECT, INSERT, TRIGGER ON public.teams TO trigger_user; -GRANT INSERT ON public.users_teams TO trigger_user; -GRANT INSERT ON public.team_api_keys TO trigger_user; -GRANT INSERT ON public.access_tokens TO trigger_user; - --- -CREATE OR REPLACE FUNCTION public.generate_default_team_trigger() - RETURNS TRIGGER - LANGUAGE plpgsql - AS $create_default_team$ -DECLARE - team_id uuid; -BEGIN - RAISE NOTICE 'Creating default team for user %', NEW.id; - INSERT INTO public.teams(name, is_default, tier, email) VALUES (NEW.email, true, 'base_v1', NEW.email) RETURNING id INTO team_id; - INSERT INTO public.users_teams(user_id, team_id) VALUES (NEW.id, team_id); - RAISE NOTICE 'Created default team for user % and team %', NEW.id, team_id; - RETURN NEW; -END -$create_default_team$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.generate_default_team_trigger() OWNER TO trigger_user; - -CREATE OR REPLACE TRIGGER create_default_team - AFTER INSERT ON auth.users - FOR EACH ROW EXECUTE FUNCTION generate_default_team_trigger(); - - -CREATE OR REPLACE FUNCTION public.generate_teams_api_keys_trigger() RETURNS TRIGGER - LANGUAGE plpgsql -AS $generate_teams_api_keys$ -DECLARE - key_prefix TEXT := 'e2b_'; - generated_key TEXT; -BEGIN - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - generated_key := encode(extensions.gen_random_bytes(20), 'hex'); - INSERT INTO public.team_api_keys (team_id, api_key) - VALUES (NEW.id, key_prefix || generated_key); - RETURN NEW; -END -$generate_teams_api_keys$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.generate_teams_api_keys_trigger() OWNER TO trigger_user; - -CREATE OR REPLACE TRIGGER team_api_keys_trigger - AFTER INSERT ON public.teams - FOR EACH ROW EXECUTE FUNCTION generate_teams_api_keys_trigger(); - - - -CREATE OR REPLACE FUNCTION public.generate_access_token_trigger() RETURNS TRIGGER - LANGUAGE plpgsql - AS $generate_access_token$ -DECLARE - key_prefix TEXT := 'sk_e2b_'; - generated_key TEXT; -BEGIN - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - generated_key := encode(extensions.gen_random_bytes(20), 'hex'); - INSERT INTO public.access_tokens (user_id, access_token) - VALUES (NEW.id, key_prefix || generated_key); - RETURN NEW; -END; -$generate_access_token$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.generate_access_token_trigger() OWNER TO trigger_user; - - -CREATE OR REPLACE TRIGGER create_access_token - AFTER INSERT ON auth.users - FOR EACH ROW EXECUTE FUNCTION generate_access_token_trigger(); - - -CREATE POLICY "Allow to create an access token to new user" - ON public.access_tokens - AS PERMISSIVE - FOR INSERT - TO trigger_user - WITH CHECK (TRUE); - -CREATE POLICY "Allow to create a team to new user" - ON public.teams - AS PERMISSIVE - FOR INSERT - TO trigger_user - WITH CHECK (TRUE); - -CREATE POLICY "Allow to create a user team connection to new user" - ON public.users_teams - AS PERMISSIVE - FOR INSERT - TO trigger_user - WITH CHECK (TRUE); - -CREATE POLICY "Allow to select a team for supabase auth admin" - ON public.teams - AS PERMISSIVE - FOR SELECT - TO trigger_user - USING (TRUE); - -CREATE POLICY "Allow to create a team api key to new user" - ON public.team_api_keys - AS PERMISSIVE - FOR INSERT - TO trigger_user - WITH CHECK (TRUE); --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "vcpu" bigint NOT NULL, ADD COLUMN "ram_mb" bigint NOT NULL, ADD COLUMN "free_disk_size_mb" bigint NOT NULL, ADD COLUMN "total_disk_size_mb" bigint NOT NULL; --- Modify "teams" table -ALTER TABLE "public"."teams" ADD COLUMN "email" character varying(255) NULL; - - -CREATE OR REPLACE FUNCTION public.generate_default_team() RETURNS TRIGGER - LANGUAGE plpgsql -AS $create_default_team$ -DECLARE - team_id uuid; -BEGIN - INSERT INTO public.teams(name, is_default, tier, email) VALUES (NEW.email, true, 'base', NEW.email) RETURNING id INTO team_id; - INSERT INTO public.users_teams(user_id, team_id) VALUES (NEW.id, team_id); - RAISE NOTICE 'Created default team for user % and team %', NEW.id, team_id; - RETURN NEW; -END -$create_default_team$ SECURITY DEFINER SET search_path = public; - -UPDATE "public"."teams" SET "email" = "name"; - -ALTER TABLE "public"."teams" ALTER COLUMN "email" SET NOT NULL;-- Modify "teams" table -ALTER TABLE "public"."teams" ADD COLUMN "is_banned" boolean NOT NULL DEFAULT false, ADD COLUMN "blocked_reason" TEXT NULL; - --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "kernel_version" character varying NULL; -UPDATE "public"."envs" SET "kernel_version" = 'vmlinux-5.10.186-old'; -ALTER TABLE "public"."envs" ALTER COLUMN "kernel_version" SET NOT NULL; -ALTER TABLE "public"."envs" ALTER COLUMN "kernel_version" SET DEFAULT 'vmlinux-5.10.186'; --- Modify "tiers" table -ALTER TABLE "public"."tiers" ADD COLUMN "max_length_hours" bigint NULL; -UPDATE "public"."tiers" SET "max_length_hours" = 1; -ALTER TABLE "public"."tiers" ALTER COLUMN "max_length_hours" SET NOT NULL; --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "firecracker_version" character varying NOT NULL DEFAULT 'v1.5.0_8a43b32e';-- Modify "envs" table -ALTER TABLE "public"."envs" ALTER COLUMN "firecracker_version" SET DEFAULT 'v1.7.0-dev_8bb88311'; --- Modify "tiers" table -ALTER TABLE "public"."tiers" DROP CONSTRAINT "tiers_ram_mb_check", DROP CONSTRAINT "tiers_vcpu_check", DROP COLUMN "vcpu", DROP COLUMN "ram_mb"; --- Modify "env_aliases" table -ALTER TABLE "public"."env_aliases" RENAME COLUMN "is_name" TO "is_renamable"; -ALTER TABLE "public"."env_aliases" ALTER COLUMN "env_id" SET NOT NULL; - --- Create "env_builds" table -CREATE TABLE "public"."env_builds" ("id" uuid NOT NULL DEFAULT gen_random_uuid(), "created_at" timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, "updated_at" timestamptz NOT NULL, "finished_at" timestamptz NULL, "status" text NOT NULL DEFAULT 'waiting', "dockerfile" text NULL, "start_cmd" text NULL, "vcpu" bigint NOT NULL, "ram_mb" bigint NOT NULL, "free_disk_size_mb" bigint NOT NULL, "total_disk_size_mb" bigint NULL, "kernel_version" text NOT NULL DEFAULT 'vmlinux-5.10.186', "firecracker_version" text NOT NULL DEFAULT 'v1.7.0-dev_8bb88311', "env_id" text NULL, PRIMARY KEY ("id"), CONSTRAINT "env_builds_envs_builds" FOREIGN KEY ("env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE); -ALTER TABLE "public"."env_builds" ENABLE ROW LEVEL SECURITY; - --- Populate "env_builds" table -INSERT INTO "public"."env_builds"(updated_at, finished_at, status, dockerfile, start_cmd, vcpu, ram_mb, free_disk_size_mb, total_disk_size_mb, kernel_version, firecracker_version, env_id) -SELECT CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, 'success', dockerfile, NULL, vcpu, ram_mb, free_disk_size_mb, total_disk_size_mb, kernel_version, firecracker_version, id -FROM "public"."envs"; - --- Modify "envs" table -ALTER TABLE "public"."envs" DROP COLUMN "dockerfile", DROP COLUMN "build_id", DROP COLUMN "vcpu", DROP COLUMN "ram_mb", DROP COLUMN "free_disk_size_mb", DROP COLUMN "total_disk_size_mb", DROP COLUMN "kernel_version", DROP COLUMN "firecracker_version"; -DROP TRIGGER create_default_team ON auth.users; -DROP FUNCTION generate_default_team_trigger(); -DROP TRIGGER team_api_keys_trigger ON public.teams; -DROP FUNCTION generate_teams_api_keys_trigger(); -DROP TRIGGER create_access_token ON auth.users; -DROP FUNCTION generate_access_token_trigger(); - -CREATE OR REPLACE FUNCTION public.extra_for_post_user_signup(user_id uuid, team_id uuid) - RETURNS void - LANGUAGE plpgsql -AS $extra_for_post_user_signup$ -DECLARE -BEGIN -END -$extra_for_post_user_signup$ SECURITY DEFINER SET search_path = public; - -CREATE OR REPLACE FUNCTION public.generate_team_api_key() - RETURNS TEXT - LANGUAGE plpgsql -AS $generate_team_api_key$ -DECLARE - team_api_key_prefix TEXT := 'e2b_'; - generated_key TEXT; -BEGIN - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - generated_key := encode(extensions.gen_random_bytes(20), 'hex'); - RETURN team_api_key_prefix || generated_key; -END -$generate_team_api_key$ SECURITY DEFINER SET search_path = public; - -ALTER TABLE public.team_api_keys ALTER COLUMN api_key SET DEFAULT public.generate_team_api_key(); - -CREATE OR REPLACE FUNCTION public.generate_access_token() - RETURNS TEXT - LANGUAGE plpgsql -AS $extra_for_post_user_signup$ -DECLARE - access_token_prefix TEXT := 'sk_e2b_'; - generated_token TEXT; -BEGIN - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - generated_token := encode(extensions.gen_random_bytes(20), 'hex'); - RETURN access_token_prefix || generated_token; -END -$extra_for_post_user_signup$ SECURITY DEFINER SET search_path = public; - -ALTER TABLE public.access_tokens ALTER COLUMN access_token SET DEFAULT public.generate_access_token(); - -ALTER FUNCTION public.extra_for_post_user_signup(uuid, uuid) OWNER TO trigger_user; - -CREATE OR REPLACE FUNCTION public.post_user_signup() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $post_user_signup$ -DECLARE - team_id uuid; -BEGIN - RAISE NOTICE 'Creating default team for user %', NEW.id; - INSERT INTO public.teams(name, is_default, tier, email) VALUES (NEW.email, true, 'base_v1', NEW.email) RETURNING id INTO team_id; - INSERT INTO public.users_teams(user_id, team_id) VALUES (NEW.id, team_id); - RAISE NOTICE 'Created default team for user % and team %', NEW.id, team_id; - - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - INSERT INTO public.team_api_keys (team_id) - VALUES (team_id); - - INSERT INTO public.access_tokens (user_id) - VALUES (NEW.id); - - PERFORM public.extra_for_post_user_signup(NEW.id, team_id); - - RETURN NEW; -END -$post_user_signup$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.post_user_signup() OWNER TO trigger_user; - - -CREATE OR REPLACE TRIGGER post_user_signup - AFTER INSERT ON auth.users - FOR EACH ROW EXECUTE FUNCTION post_user_signup(); - - -CREATE OR REPLACE FUNCTION is_member_of_team(_user_id uuid, _team_id uuid) RETURNS bool AS $$ -SELECT EXISTS ( - SELECT 1 - FROM public.users_teams ut - WHERE ut.user_id = _user_id - AND ut.team_id = _team_id -); -$$ LANGUAGE sql SECURITY DEFINER; - --- Create RLS policies for user management -DO $$ - BEGIN - BEGIN - CREATE POLICY "Allow users to delete a team api key" - ON "public"."team_api_keys" - AS PERMISSIVE - FOR DELETE - TO authenticated - USING ((SELECT auth.uid()) IN ( SELECT users_teams.user_id - FROM users_teams - WHERE (users_teams.team_id = team_api_keys.team_id))); - - CREATE POLICY "Allow users to create a new team user entry" - ON "public"."users_teams" - AS PERMISSIVE - FOR INSERT - TO authenticated - WITH CHECK (team_id IN ( SELECT users_teams.team_id - FROM users_teams - WHERE (users_teams.user_id = (SELECT auth.uid())))); - - CREATE POLICY "Allow users to delete a team user entry" - ON public.users_teams - AS PERMISSIVE - FOR DELETE - TO authenticated - USING (team_id IN ( SELECT users_teams.team_id - FROM users_teams - WHERE (users_teams.user_id = auth.uid()))); - - CREATE POLICY "Allow update for users that are in the team" - ON "public"."teams" - AS PERMISSIVE - FOR UPDATE - TO authenticated - USING ((auth.uid() IN ( SELECT users_teams.user_id - FROM users_teams - WHERE (users_teams.team_id = teams.id)))); - - ALTER POLICY "Enable select for users in relevant team" - on "public"."users_teams" - to authenticated - using (is_member_of_team(auth.uid(), team_id) - ); - - END; - END $$; -;-- Modify "env_builds" table -ALTER TABLE "public"."env_builds" ADD COLUMN "envd_version" text NULL; - --- Populate "envd_version" column -UPDATE "public"."env_builds" SET "envd_version" = 'v0.0.1'; --- Modify "access_tokens" table -ALTER TABLE "public"."users_teams" ADD COLUMN "is_default" boolean NOT NULL DEFAULT false; -UPDATE "public"."users_teams" ut SET "is_default" = t."is_default" FROM "public"."teams" t WHERE ut."team_id" = t."id"; - -CREATE OR REPLACE FUNCTION public.post_user_signup() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $post_user_signup$ -DECLARE - team_id uuid; -BEGIN - RAISE NOTICE 'Creating default team for user %', NEW.id; - INSERT INTO public.teams(name, is_default, tier, email) VALUES (NEW.email, true, 'base_v1', NEW.email) RETURNING id INTO team_id; - INSERT INTO public.users_teams(user_id, team_id, is_default) VALUES (NEW.id, team_id, true); - RAISE NOTICE 'Created default team for user % and team %', NEW.id, team_id; - - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - INSERT INTO public.team_api_keys (team_id) - VALUES (team_id); - - INSERT INTO public.access_tokens (user_id) - VALUES (NEW.id); - - PERFORM public.extra_for_post_user_signup(NEW.id, team_id); - - RETURN NEW; -END -$post_user_signup$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.post_user_signup() OWNER TO trigger_user;DROP POLICY "Allow update for users that are in the team" ON "public"."teams"; -DROP POLICY "Allow users to delete a team user entry" ON "public"."users_teams"; -DROP POLICY "Allow users to create a new team user entry" ON "public"."users_teams"; --- Modify "team_api_keys" table -ALTER TABLE "public"."team_api_keys" - ADD COLUMN "updated_at" timestamptz NULL, - ADD COLUMN "name" text NOT NULL DEFAULT 'Unnamed API Key', - ADD COLUMN "last_used" timestamptz NULL, - ADD COLUMN "created_by" uuid NULL, - ADD CONSTRAINT "team_api_keys_users_created_api_keys" FOREIGN KEY ("created_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL; --- Modify "team_api_keys" table -ALTER TABLE "public"."team_api_keys" - DROP CONSTRAINT "team_api_keys_pkey", - ADD COLUMN "id" uuid NOT NULL DEFAULT gen_random_uuid(), - ADD PRIMARY KEY ("id"); --- Create index "team_api_keys_api_key_key" to table: "team_api_keys" -CREATE UNIQUE INDEX "team_api_keys_api_key_key" ON "public"."team_api_keys" ("api_key");-- Modify "envs" table -ALTER TABLE "public"."envs" - ADD COLUMN "created_by" uuid NULL, - ADD CONSTRAINT "envs_users_created_envs" FOREIGN KEY ("created_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL; --- Add column to "users_teams" table -ALTER TABLE "public"."users_teams" ADD COLUMN "added_by" uuid NULL; -ALTER TABLE "public"."users_teams" ADD CONSTRAINT "users_teams_added_by_user" FOREIGN KEY ("added_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL;-- Create "snapshots" table -CREATE TABLE "public"."snapshots" -( - created_at timestamp with time zone null, - env_id text not null, - sandbox_id text not null, - id uuid not null default gen_random_uuid (), - metadata jsonb null, - base_env_id text not null, - constraint snapshots_pkey primary key (id) -); -ALTER TABLE "public"."snapshots" ENABLE ROW LEVEL SECURITY; --- Alter "teams" table -ALTER TABLE "public"."teams" DROP COLUMN "is_default"; - -CREATE OR REPLACE FUNCTION public.post_user_signup() - RETURNS TRIGGER - LANGUAGE plpgsql -AS $post_user_signup$ -DECLARE - team_id uuid; -BEGIN - RAISE NOTICE 'Creating default team for user %', NEW.id; - INSERT INTO public.teams(name, tier, email) VALUES (NEW.email, 'base_v1', NEW.email) RETURNING id INTO team_id; - INSERT INTO public.users_teams(user_id, team_id, is_default) VALUES (NEW.id, team_id, true); - RAISE NOTICE 'Created default team for user % and team %', NEW.id, team_id; - - -- Generate a random 20 byte string and encode it as hex, so it's 40 characters - INSERT INTO public.team_api_keys (team_id) - VALUES (team_id); - - INSERT INTO public.access_tokens (user_id) - VALUES (NEW.id); - - PERFORM public.extra_for_post_user_signup(NEW.id, team_id); - - RETURN NEW; -END -$post_user_signup$ SECURITY DEFINER SET search_path = public; -/* -This migration adds team slugs and profile pictures to support user-friendly URLs and team branding. - -It performs the following steps: - -1. Adds two new columns to the teams table: - - slug: A URL-friendly version of the team name (e.g. "acme-inc") - - profile_picture_url: URL to the team's profile picture - -2. Creates a slug generation function that: - - Takes a team name and converts it to a URL-friendly format - - Removes special characters, accents, and spaces - - Handles email addresses by only using the part before @ - - Converts to lowercase and replaces spaces with hyphens - -3. Installs the unaccent PostgreSQL extension for proper accent handling - -4. Generates initial slugs for all existing teams: - - Uses the team name as base for the slug - - If multiple teams would have the same slug, appends part of the team ID - to ensure uniqueness - -5. Sets up automatic slug generation for new teams: - - Creates a trigger that runs before team insertion - - Generates a unique slug using random suffixes if needed - - Only generates a slug if one isn't explicitly provided - -6. Enforces slug uniqueness with a database constraint -*/ - -ALTER TABLE teams -ADD COLUMN slug TEXT, -ADD COLUMN profile_picture_url TEXT; - -CREATE OR REPLACE FUNCTION generate_team_slug(name TEXT) -RETURNS TEXT AS $$ -DECLARE - base_name TEXT; -BEGIN - base_name := SPLIT_PART(name, '@', 1); - - RETURN LOWER( - REGEXP_REPLACE( - REGEXP_REPLACE( - UNACCENT(TRIM(base_name)), - '[^a-zA-Z0-9\s-]', - '', - 'g' - ), - '\s+', - '-', - 'g' - ) - ); -END; -$$ LANGUAGE plpgsql; - -CREATE EXTENSION IF NOT EXISTS unaccent; - -WITH numbered_teams AS ( - SELECT - id, - name, - generate_team_slug(name) as base_slug, - ROW_NUMBER() OVER (PARTITION BY generate_team_slug(name) ORDER BY created_at) as slug_count - FROM teams - WHERE slug IS NULL -) -UPDATE teams -SET slug = - CASE - WHEN t.slug_count = 1 THEN t.base_slug - ELSE t.base_slug || '-' || SUBSTRING(teams.id::text, 1, 4) - END -FROM numbered_teams t -WHERE teams.id = t.id; - -CREATE OR REPLACE FUNCTION generate_team_slug_trigger() -RETURNS TRIGGER AS $$ -DECLARE - base_slug TEXT; - test_slug TEXT; - suffix TEXT; -BEGIN - IF NEW.slug IS NULL THEN - base_slug := generate_team_slug(NEW.name); - test_slug := base_slug; - - WHILE EXISTS (SELECT 1 FROM teams WHERE slug = test_slug) LOOP - suffix := SUBSTRING(gen_random_uuid()::text, 33, 4); - test_slug := base_slug || '-' || suffix; - END LOOP; - - NEW.slug := test_slug; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER team_slug_trigger -BEFORE INSERT ON teams -FOR EACH ROW -EXECUTE FUNCTION generate_team_slug_trigger(); - -ALTER TABLE teams -ADD CONSTRAINT teams_slug_unique UNIQUE (slug); - -ALTER TABLE teams -ALTER COLUMN slug SET NOT NULL;ALTER TABLE "public"."snapshots" - ADD CONSTRAINT "snapshots_envs_env_id" FOREIGN KEY ("env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE, - ADD CONSTRAINT "snapshots_envs_base_env_id" FOREIGN KEY ("base_env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE; \ No newline at end of file diff --git a/packages/shared/migrations/20231222181015.sql b/packages/shared/migrations/20231222181015.sql deleted file mode 100644 index dc629c4..0000000 --- a/packages/shared/migrations/20231222181015.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "vcpu" bigint NOT NULL, ADD COLUMN "ram_mb" bigint NOT NULL, ADD COLUMN "free_disk_size_mb" bigint NOT NULL, ADD COLUMN "total_disk_size_mb" bigint NOT NULL; diff --git a/packages/shared/migrations/20240106121919.sql b/packages/shared/migrations/20240106121919.sql deleted file mode 100644 index 2d0229c..0000000 --- a/packages/shared/migrations/20240106121919.sql +++ /dev/null @@ -1,3 +0,0 @@ --- Modify "teams" table -ALTER TABLE "public"."teams" ADD COLUMN "is_banned" boolean NOT NULL DEFAULT false, ADD COLUMN "blocked_reason" TEXT NULL; - diff --git a/packages/shared/migrations/20240202120312.sql b/packages/shared/migrations/20240202120312.sql deleted file mode 100644 index 60758c8..0000000 --- a/packages/shared/migrations/20240202120312.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "kernel_version" character varying NULL; -UPDATE "public"."envs" SET "kernel_version" = 'vmlinux-5.10.186-old'; -ALTER TABLE "public"."envs" ALTER COLUMN "kernel_version" SET NOT NULL; -ALTER TABLE "public"."envs" ALTER COLUMN "kernel_version" SET DEFAULT 'vmlinux-5.10.186'; diff --git a/packages/shared/migrations/20240219190940.sql b/packages/shared/migrations/20240219190940.sql deleted file mode 100644 index 9880b9f..0000000 --- a/packages/shared/migrations/20240219190940.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Modify "tiers" table -ALTER TABLE "public"."tiers" ADD COLUMN "max_length_hours" bigint NULL; -UPDATE "public"."tiers" SET "max_length_hours" = 1; -ALTER TABLE "public"."tiers" ALTER COLUMN "max_length_hours" SET NOT NULL; diff --git a/packages/shared/migrations/20240221023613.sql b/packages/shared/migrations/20240221023613.sql deleted file mode 100644 index 3c05a90..0000000 --- a/packages/shared/migrations/20240221023613.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Modify "envs" table -ALTER TABLE "public"."envs" ADD COLUMN "firecracker_version" character varying NOT NULL DEFAULT 'v1.5.0_8a43b32e'; \ No newline at end of file diff --git a/packages/shared/migrations/20240221215408.sql b/packages/shared/migrations/20240221215408.sql deleted file mode 100644 index d94f938..0000000 --- a/packages/shared/migrations/20240221215408.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Modify "envs" table -ALTER TABLE "public"."envs" ALTER COLUMN "firecracker_version" SET DEFAULT 'v1.7.0-dev_8bb88311'; diff --git a/packages/shared/migrations/20240305221944.sql b/packages/shared/migrations/20240305221944.sql deleted file mode 100644 index 84be66c..0000000 --- a/packages/shared/migrations/20240305221944.sql +++ /dev/null @@ -1,2 +0,0 @@ --- Modify "tiers" table -ALTER TABLE "public"."tiers" DROP CONSTRAINT "tiers_ram_mb_check", DROP CONSTRAINT "tiers_vcpu_check", DROP COLUMN "vcpu", DROP COLUMN "ram_mb"; diff --git a/packages/shared/migrations/20240625095352.sql b/packages/shared/migrations/20240625095352.sql deleted file mode 100644 index a47b300..0000000 --- a/packages/shared/migrations/20240625095352.sql +++ /dev/null @@ -1,5 +0,0 @@ --- Modify "env_builds" table -ALTER TABLE "public"."env_builds" ADD COLUMN "envd_version" text NULL; - --- Populate "envd_version" column -UPDATE "public"."env_builds" SET "envd_version" = 'v0.0.1'; diff --git a/packages/shared/migrations/20240909142106.sql b/packages/shared/migrations/20240909142106.sql deleted file mode 100644 index 98187bf..0000000 --- a/packages/shared/migrations/20240909142106.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP POLICY "Allow update for users that are in the team" ON "public"."teams"; -DROP POLICY "Allow users to delete a team user entry" ON "public"."users_teams"; -DROP POLICY "Allow users to create a new team user entry" ON "public"."users_teams"; diff --git a/packages/shared/migrations/20241120222814.sql b/packages/shared/migrations/20241120222814.sql deleted file mode 100644 index 6d8039b..0000000 --- a/packages/shared/migrations/20241120222814.sql +++ /dev/null @@ -1,7 +0,0 @@ --- Modify "team_api_keys" table -ALTER TABLE "public"."team_api_keys" - ADD COLUMN "updated_at" timestamptz NULL, - ADD COLUMN "name" text NOT NULL DEFAULT 'Unnamed API Key', - ADD COLUMN "last_used" timestamptz NULL, - ADD COLUMN "created_by" uuid NULL, - ADD CONSTRAINT "team_api_keys_users_created_api_keys" FOREIGN KEY ("created_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL; diff --git a/packages/shared/migrations/20241121225404.sql b/packages/shared/migrations/20241121225404.sql deleted file mode 100644 index 0d498fe..0000000 --- a/packages/shared/migrations/20241121225404.sql +++ /dev/null @@ -1,7 +0,0 @@ --- Modify "team_api_keys" table -ALTER TABLE "public"."team_api_keys" - DROP CONSTRAINT "team_api_keys_pkey", - ADD COLUMN "id" uuid NOT NULL DEFAULT gen_random_uuid(), - ADD PRIMARY KEY ("id"); --- Create index "team_api_keys_api_key_key" to table: "team_api_keys" -CREATE UNIQUE INDEX "team_api_keys_api_key_key" ON "public"."team_api_keys" ("api_key"); \ No newline at end of file diff --git a/packages/shared/migrations/20241127174604.sql b/packages/shared/migrations/20241127174604.sql deleted file mode 100644 index 82c94c1..0000000 --- a/packages/shared/migrations/20241127174604.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Modify "envs" table -ALTER TABLE "public"."envs" - ADD COLUMN "created_by" uuid NULL, - ADD CONSTRAINT "envs_users_created_envs" FOREIGN KEY ("created_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL; diff --git a/packages/shared/migrations/20241206124325.sql b/packages/shared/migrations/20241206124325.sql deleted file mode 100644 index 38504fd..0000000 --- a/packages/shared/migrations/20241206124325.sql +++ /dev/null @@ -1,3 +0,0 @@ --- Add column to "users_teams" table -ALTER TABLE "public"."users_teams" ADD COLUMN "added_by" uuid NULL; -ALTER TABLE "public"."users_teams" ADD CONSTRAINT "users_teams_added_by_user" FOREIGN KEY ("added_by") REFERENCES "auth"."users" ("id") ON UPDATE NO ACTION ON DELETE SET NULL; \ No newline at end of file diff --git a/packages/shared/migrations/20250205180205.sql b/packages/shared/migrations/20250205180205.sql deleted file mode 100644 index 462b4f5..0000000 --- a/packages/shared/migrations/20250205180205.sql +++ /dev/null @@ -1,109 +0,0 @@ -/* -This migration adds team slugs and profile pictures to support user-friendly URLs and team branding. - -It performs the following steps: - -1. Adds two new columns to the teams table: - - slug: A URL-friendly version of the team name (e.g. "acme-inc") - - profile_picture_url: URL to the team's profile picture - -2. Creates a slug generation function that: - - Takes a team name and converts it to a URL-friendly format - - Removes special characters, accents, and spaces - - Handles email addresses by only using the part before @ - - Converts to lowercase and replaces spaces with hyphens - -3. Installs the unaccent PostgreSQL extension for proper accent handling - -4. Generates initial slugs for all existing teams: - - Uses the team name as base for the slug - - If multiple teams would have the same slug, appends part of the team ID - to ensure uniqueness - -5. Sets up automatic slug generation for new teams: - - Creates a trigger that runs before team insertion - - Generates a unique slug using random suffixes if needed - - Only generates a slug if one isn't explicitly provided - -6. Enforces slug uniqueness with a database constraint -*/ - -ALTER TABLE teams -ADD COLUMN slug TEXT, -ADD COLUMN profile_picture_url TEXT; - -CREATE OR REPLACE FUNCTION generate_team_slug(name TEXT) -RETURNS TEXT AS $$ -DECLARE - base_name TEXT; -BEGIN - base_name := SPLIT_PART(name, '@', 1); - - RETURN LOWER( - REGEXP_REPLACE( - REGEXP_REPLACE( - UNACCENT(TRIM(base_name)), - '[^a-zA-Z0-9\s-]', - '', - 'g' - ), - '\s+', - '-', - 'g' - ) - ); -END; -$$ LANGUAGE plpgsql; - -CREATE EXTENSION IF NOT EXISTS unaccent; - -WITH numbered_teams AS ( - SELECT - id, - name, - generate_team_slug(name) as base_slug, - ROW_NUMBER() OVER (PARTITION BY generate_team_slug(name) ORDER BY created_at) as slug_count - FROM teams - WHERE slug IS NULL -) -UPDATE teams -SET slug = - CASE - WHEN t.slug_count = 1 THEN t.base_slug - ELSE t.base_slug || '-' || SUBSTRING(teams.id::text, 1, 4) - END -FROM numbered_teams t -WHERE teams.id = t.id; - -CREATE OR REPLACE FUNCTION generate_team_slug_trigger() -RETURNS TRIGGER AS $$ -DECLARE - base_slug TEXT; - test_slug TEXT; - suffix TEXT; -BEGIN - IF NEW.slug IS NULL THEN - base_slug := generate_team_slug(NEW.name); - test_slug := base_slug; - - WHILE EXISTS (SELECT 1 FROM teams WHERE slug = test_slug) LOOP - suffix := SUBSTRING(gen_random_uuid()::text, 33, 4); - test_slug := base_slug || '-' || suffix; - END LOOP; - - NEW.slug := test_slug; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; - -CREATE TRIGGER team_slug_trigger -BEFORE INSERT ON teams -FOR EACH ROW -EXECUTE FUNCTION generate_team_slug_trigger(); - -ALTER TABLE teams -ADD CONSTRAINT teams_slug_unique UNIQUE (slug); - -ALTER TABLE teams -ALTER COLUMN slug SET NOT NULL; \ No newline at end of file diff --git a/packages/shared/migrations/20250206105106.sql b/packages/shared/migrations/20250206105106.sql deleted file mode 100644 index ad0dadf..0000000 --- a/packages/shared/migrations/20250206105106.sql +++ /dev/null @@ -1,3 +0,0 @@ -ALTER TABLE "public"."snapshots" - ADD CONSTRAINT "snapshots_envs_env_id" FOREIGN KEY ("env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE, - ADD CONSTRAINT "snapshots_envs_base_env_id" FOREIGN KEY ("base_env_id") REFERENCES "public"."envs" ("id") ON UPDATE NO ACTION ON DELETE CASCADE; \ No newline at end of file diff --git a/packages/shared/pkg/artifacts-registry/registry.go b/packages/shared/pkg/artifacts-registry/registry.go new file mode 100644 index 0000000..e661cfc --- /dev/null +++ b/packages/shared/pkg/artifacts-registry/registry.go @@ -0,0 +1,50 @@ +package artifacts_registry + +import ( + "context" + "errors" + "fmt" + "time" + + containerregistry "github.com/google/go-containerregistry/pkg/v1" + + "github.com/e2b-dev/infra/packages/shared/pkg/env" +) + +type RegistryProvider string + +const ( + GCPStorageProvider RegistryProvider = "GCP_ARTIFACTS" + AWSStorageProvider RegistryProvider = "AWS_ECR" + LocalStorageProvider RegistryProvider = "Local" + + DefaultRegistryProvider RegistryProvider = GCPStorageProvider + + storageProviderEnv = "ARTIFACTS_REGISTRY_PROVIDER" +) + +var ErrImageNotExists = errors.New("image does not exist") + +type ArtifactsRegistry interface { + GetTag(ctx context.Context, templateId string, buildId string) (string, error) + GetImage(ctx context.Context, templateId string, buildId string, platform containerregistry.Platform) (containerregistry.Image, error) + Delete(ctx context.Context, templateId string, buildId string) error +} + +func GetArtifactsRegistryProvider() (ArtifactsRegistry, error) { + provider := RegistryProvider(env.GetEnv(storageProviderEnv, string(DefaultRegistryProvider))) + + setupCtx, setupCtxCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer setupCtxCancel() + + switch provider { + case AWSStorageProvider: + return NewAWSArtifactsRegistry(setupCtx) + case GCPStorageProvider: + return NewGCPArtifactsRegistry(setupCtx) + case LocalStorageProvider: + return NewLocalArtifactsRegistry() + } + + return nil, fmt.Errorf("unknown artifacts registry provider: %s", provider) +} diff --git a/packages/shared/pkg/artifacts-registry/registry_aws.go b/packages/shared/pkg/artifacts-registry/registry_aws.go new file mode 100644 index 0000000..ef89c26 --- /dev/null +++ b/packages/shared/pkg/artifacts-registry/registry_aws.go @@ -0,0 +1,137 @@ +package artifacts_registry + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ecr" + "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +type AWSArtifactsRegistry struct { + repositoryName string + client *ecr.Client +} + +var ( + AwsRepositoryNameEnvVar = "AWS_DOCKER_REPOSITORY_NAME" + AwsRepositoryName = os.Getenv(AwsRepositoryNameEnvVar) +) + +func NewAWSArtifactsRegistry(ctx context.Context) (*AWSArtifactsRegistry, error) { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return nil, err + } + + if AwsRepositoryName == "" { + return nil, fmt.Errorf("%s environment variable is not set", AwsRepositoryNameEnvVar) + } + + client := ecr.NewFromConfig(cfg) + + return &AWSArtifactsRegistry{ + repositoryName: AwsRepositoryName, + client: client, + }, nil +} + +func (g *AWSArtifactsRegistry) Delete(ctx context.Context, templateId string, buildId string) error { + imageIds := []types.ImageIdentifier{ + {ImageTag: &buildId}, + } + + // for AWS implementation we are using only build id as image tag + res, err := g.client.BatchDeleteImage(ctx, &ecr.BatchDeleteImageInput{RepositoryName: &g.repositoryName, ImageIds: imageIds}) + if err != nil { + return fmt.Errorf("failed to delete image from aws ecr: %w", err) + } + + if len(res.Failures) > 0 { + if res.Failures[0].FailureCode == types.ImageFailureCodeImageNotFound { + return ErrImageNotExists + } + + return errors.New("failed to delete image from aws ecr") + } + + return nil +} + +func (g *AWSArtifactsRegistry) GetTag(ctx context.Context, templateId string, buildId string) (string, error) { + repositoryNameWithTemplate := fmt.Sprintf("%s/%s", g.repositoryName, templateId) + res, err := g.client.DescribeRepositories(ctx, &ecr.DescribeRepositoriesInput{RepositoryNames: []string{repositoryNameWithTemplate}}) + if err != nil { + return "", fmt.Errorf("failed to describe aws ecr repository: %w", err) + } + + if len(res.Repositories) == 0 { + return "", fmt.Errorf("repository %s not found", g.repositoryName) + } + + return fmt.Sprintf("%s:%s", *res.Repositories[0].RepositoryUri, buildId), nil +} + +func (g *AWSArtifactsRegistry) GetImage(ctx context.Context, templateId string, buildId string, platform containerregistry.Platform) (containerregistry.Image, error) { + imageUrl, err := g.GetTag(ctx, templateId, buildId) + if err != nil { + return nil, fmt.Errorf("failed to get image URL: %w", err) + } + + ref, err := name.ParseReference(imageUrl) + if err != nil { + return nil, fmt.Errorf("invalid image reference: %w", err) + } + + auth, err := g.getAuthToken(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get auth: %w", err) + } + + img, err := remote.Image(ref, remote.WithAuth(auth), remote.WithPlatform(platform)) + if err != nil { + return nil, fmt.Errorf("error pulling image: %w", err) + } + + return img, nil +} + +func (g *AWSArtifactsRegistry) getAuthToken(ctx context.Context) (*authn.Basic, error) { + res, err := g.client.GetAuthorizationToken(ctx, &ecr.GetAuthorizationTokenInput{}) + if err != nil { + return nil, fmt.Errorf("failed to get aws ecr auth token: %w", err) + } + + if len(res.AuthorizationData) == 0 { + return nil, fmt.Errorf("no aws ecr auth token found") + } + + authData := res.AuthorizationData[0] + decodedToken, err := base64.StdEncoding.DecodeString(*authData.AuthorizationToken) + if err != nil { + return nil, fmt.Errorf("failed to decode aws ecr auth token: %w", err) + } + + // split into username and password + parts := strings.SplitN(string(decodedToken), ":", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid aws ecr auth token") + } + + username := parts[0] + password := parts[1] + + return &authn.Basic{ + Username: username, + Password: password, + }, nil +} diff --git a/packages/shared/pkg/artifacts-registry/registry_gcp.go b/packages/shared/pkg/artifacts-registry/registry_gcp.go new file mode 100644 index 0000000..75ac4b7 --- /dev/null +++ b/packages/shared/pkg/artifacts-registry/registry_gcp.go @@ -0,0 +1,114 @@ +package artifacts_registry + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + artifactregistry "cloud.google.com/go/artifactregistry/apiv1" + "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/e2b-dev/infra/packages/shared/pkg/consts" +) + +type GCPArtifactsRegistry struct { + registry *artifactregistry.Client +} + +var gcpAuthConfig = authn.Basic{ + Username: "_json_key_base64", + Password: consts.GoogleServiceAccountSecret, +} + +func NewGCPArtifactsRegistry(ctx context.Context) (*GCPArtifactsRegistry, error) { + registry, err := artifactregistry.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("error creating artifact registry client: %v", err) + } + + return &GCPArtifactsRegistry{registry: registry}, nil +} + +func (g *GCPArtifactsRegistry) Delete(ctx context.Context, templateId string, buildId string) error { + tagPath := g.getDockerImageTagPath(templateId, buildId) + err := g.registry.DeleteTag(ctx, &artifactregistrypb.DeleteTagRequest{Name: tagPath}) + if err != nil { + if status.Code(err) == codes.NotFound { + return ErrImageNotExists + } + + return fmt.Errorf("error deleting tag %s: %v", tagPath, err) + } + + return nil +} + +func (g *GCPArtifactsRegistry) GetTag(ctx context.Context, templateId string, buildId string) (string, error) { + return fmt.Sprintf("%s-docker.pkg.dev/%s/%s/%s:%s", consts.GCPRegion, consts.GCPProject, consts.DockerRegistry, templateId, buildId), nil +} + +func (g *GCPArtifactsRegistry) GetImage(ctx context.Context, templateId string, buildId string, platform containerregistry.Platform) (containerregistry.Image, error) { + imageUrl, err := g.GetTag(ctx, templateId, buildId) + if err != nil { + return nil, fmt.Errorf("failed to get image URL: %w", err) + } + + ref, err := name.ParseReference(imageUrl) + if err != nil { + return nil, fmt.Errorf("invalid image reference: %w", err) + } + + auth, err := g.getAuthToken(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get auth: %w", err) + } + + img, err := remote.Image(ref, remote.WithAuth(auth), remote.WithPlatform(platform)) + if err != nil { + return nil, fmt.Errorf("error pulling image: %w", err) + } + + return img, nil +} + +func (g *GCPArtifactsRegistry) getAuthToken(ctx context.Context) (*authn.Basic, error) { + authCfg := consts.DockerAuthConfig + if authCfg == "" { + return &gcpAuthConfig, nil + } + + decoded, err := base64.URLEncoding.DecodeString(authCfg) + if err != nil { + return nil, fmt.Errorf("failed to decode auth config: %w", err) + } + + var cfg struct { + Username string `json:"username"` + Password string `json:"password"` + } + + if err := json.Unmarshal(decoded, &cfg); err != nil { + return nil, fmt.Errorf("invalid JSON auth config: %w", err) + } + + return &authn.Basic{ + Username: cfg.Username, + Password: cfg.Password, + }, nil +} + +func (g *GCPArtifactsRegistry) getDockerImagePath(templateId string) string { + // DockerImagesURL is the URL to the docker images in the artifact registry + return fmt.Sprintf("projects/%s/locations/%s/repositories/%s/packages/%s", consts.GCPProject, consts.GCPRegion, consts.DockerRegistry, templateId) +} + +func (g *GCPArtifactsRegistry) getDockerImageTagPath(templateId string, buildId string) string { + return fmt.Sprintf("%s/tags/%s", g.getDockerImagePath(templateId), buildId) +} diff --git a/packages/shared/pkg/artifacts-registry/registry_local.go b/packages/shared/pkg/artifacts-registry/registry_local.go new file mode 100644 index 0000000..d43f2f2 --- /dev/null +++ b/packages/shared/pkg/artifacts-registry/registry_local.go @@ -0,0 +1,44 @@ +package artifacts_registry + +import ( + "context" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + containerregistry "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/daemon" +) + +type LocalArtifactsRegistry struct{} + +func NewLocalArtifactsRegistry() (*LocalArtifactsRegistry, error) { + return &LocalArtifactsRegistry{}, nil +} + +func (g *LocalArtifactsRegistry) Delete(ctx context.Context, templateId string, buildId string) error { + // for now, just assume local image can be deleted manually + return nil +} + +func (g *LocalArtifactsRegistry) GetTag(ctx context.Context, templateId string, buildId string) (string, error) { + return fmt.Sprintf("%s:%s", templateId, buildId), nil +} + +func (g *LocalArtifactsRegistry) GetImage(ctx context.Context, templateId string, buildId string, platform containerregistry.Platform) (containerregistry.Image, error) { + imageUrl, err := g.GetTag(ctx, templateId, buildId) + if err != nil { + return nil, fmt.Errorf("failed to get image URL: %w", err) + } + + ref, err := name.ParseReference(imageUrl) + if err != nil { + return nil, fmt.Errorf("invalid image reference: %w", err) + } + + img, err := daemon.Image(ref, daemon.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("failed to get image from local registry: %w", err) + } + + return img, nil +} diff --git a/packages/shared/pkg/chdb/clickhouse.go b/packages/shared/pkg/chdb/clickhouse.go new file mode 100644 index 0000000..d2669af --- /dev/null +++ b/packages/shared/pkg/chdb/clickhouse.go @@ -0,0 +1,86 @@ +package chdb + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + + "github.com/e2b-dev/infra/packages/shared/pkg/models/chmodels" +) + +type Store interface { + Close() error + + // Base queries + Query(ctx context.Context, query string, args ...any) (driver.Rows, error) + Exec(ctx context.Context, query string, args ...any) error + + // Metrics queries + InsertMetrics(ctx context.Context, metrics chmodels.Metrics) error + QueryMetrics(ctx context.Context, sandboxID, teamID string, start int64, limit int) ([]chmodels.Metrics, error) +} + +type ClickHouseStore struct { + Conn driver.Conn +} + +func NewConn(config ClickHouseConfig) (driver.Conn, error) { + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate ClickHouse config: %w", err) + } + + var ( + ctx = context.Background() + conn, err = clickhouse.Open(&clickhouse.Options{ + Addr: []string{config.ConnectionString}, + Protocol: clickhouse.Native, + TLS: &tls.Config{}, // Not using TLS for now + Auth: clickhouse.Auth{ + Database: config.Database, + Username: config.Username, + Password: config.Password, + }, + DialTimeout: time.Second * 5, + ReadTimeout: time.Second * 90, + Debug: config.Debug, + Compression: &clickhouse.Compression{ + Method: clickhouse.CompressionLZ4, + }, + }) + ) + + if err != nil { + return nil, err + } + + if err := conn.Ping(ctx); err != nil { + return nil, err + } + + return conn, nil +} + +func NewStore(config ClickHouseConfig) (Store, error) { + conn, err := NewConn(config) + if err != nil { + return nil, err + } + + return &ClickHouseStore{conn}, nil +} + +func (c *ClickHouseStore) Close() error { + return c.Conn.Close() +} + +func (c *ClickHouseStore) Query(ctx context.Context, query string, args ...any) (driver.Rows, error) { + return c.Conn.Query(ctx, query, args...) +} + +func (c *ClickHouseStore) Exec(ctx context.Context, query string, args ...any) error { + return c.Conn.Exec(ctx, query, args...) +} diff --git a/packages/shared/pkg/chdb/config.go b/packages/shared/pkg/chdb/config.go new file mode 100644 index 0000000..0131246 --- /dev/null +++ b/packages/shared/pkg/chdb/config.go @@ -0,0 +1,27 @@ +package chdb + +import "fmt" + +type ClickHouseConfig struct { + ConnectionString string + Username string + Password string + Database string + Debug bool +} + +func (c ClickHouseConfig) Validate() error { + if c.ConnectionString == "" { + return fmt.Errorf("clickhouse connection string cannot be empty string") + } + + if c.Username == "" { + return fmt.Errorf("clickhouse username cannot be empty string") + } + + if c.Database == "" { + return fmt.Errorf("clickhouse database cannot be empty string") + } + + return nil +} diff --git a/packages/shared/pkg/chdb/metrics.go b/packages/shared/pkg/chdb/metrics.go new file mode 100644 index 0000000..cd5b0de --- /dev/null +++ b/packages/shared/pkg/chdb/metrics.go @@ -0,0 +1,43 @@ +package chdb + +import ( + "context" + "fmt" + + "github.com/e2b-dev/infra/packages/shared/pkg/models/chmodels" +) + +func (c *ClickHouseStore) InsertMetrics(ctx context.Context, metrics chmodels.Metrics) error { + batch, err := c.Conn.PrepareBatch(ctx, "INSERT INTO metrics") + if err != nil { + return err + } + err = batch.AppendStruct(&metrics) + if err != nil { + batch.Abort() + return fmt.Errorf("failed to append metrics struct to clickhouse batcher: %w", err) + } + + return batch.Send() +} + +func (c *ClickHouseStore) QueryMetrics(ctx context.Context, sandboxID, teamID string, start int64, limit int) ([]chmodels.Metrics, error) { + query := "SELECT * FROM metrics WHERE sandbox_id = (?) AND team_id = (?) AND timestamp >= (?) LIMIT (?)" + + rows, err := c.Query(ctx, query, sandboxID, teamID, start, limit) + if err != nil { + return nil, err + } + defer rows.Close() + + var metrics []chmodels.Metrics + for rows.Next() { + var metric chmodels.Metrics + if err := rows.ScanStruct(&metric); err != nil { + return nil, err + } + metrics = append(metrics, metric) + } + + return metrics, rows.Err() +} diff --git a/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.down.sql b/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.down.sql new file mode 100644 index 0000000..9b746c0 --- /dev/null +++ b/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS metrics; diff --git a/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.up.sql b/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.up.sql new file mode 100644 index 0000000..188fdf1 --- /dev/null +++ b/packages/shared/pkg/chdb/migrations/0001_create_metrics_table.up.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS metrics ( + timestamp DateTime('UTC'), + sandbox_id String, + team_id String, + cpu_count UInt32, + cpu_used_pct Float32, + mem_total_mib UInt64, + mem_used_mib UInt64 +) Engine MergeTree() + ORDER BY (timestamp) + PRIMARY KEY timestamp; \ No newline at end of file diff --git a/packages/shared/pkg/chdb/migrator.go b/packages/shared/pkg/chdb/migrator.go new file mode 100644 index 0000000..1af7220 --- /dev/null +++ b/packages/shared/pkg/chdb/migrator.go @@ -0,0 +1,119 @@ +package chdb + +import ( + "crypto/tls" + "embed" + "fmt" + + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/golang-migrate/migrate/v4" + migch "github.com/golang-migrate/migrate/v4/database/clickhouse" + "github.com/golang-migrate/migrate/v4/source/iofs" +) + +// Thin wrapper around the migrate package to make it easier to use. + +//go:embed migrations/*.sql +var migrationsFS embed.FS + +type ClickhouseMigrator struct { + m *migrate.Migrate +} + +func (chMig *ClickhouseMigrator) Up() error { + return chMig.m.Up() +} + +func (chMig *ClickhouseMigrator) Down() error { + return chMig.m.Down() +} + +func (chMig *ClickhouseMigrator) Version() (uint, bool, error) { + return chMig.m.Version() +} + +func (chMig *ClickhouseMigrator) To(version uint) error { + return chMig.m.Migrate(version) +} + +func (chMig *ClickhouseMigrator) Force(version int) error { + return chMig.m.Force(version) +} + +func (chMig *ClickhouseMigrator) List() ([]string, error) { + dirEntries, err := migrationsFS.ReadDir("migrations") + if err != nil { + return nil, err + } + + migrationFiles := make([]string, 0) + for _, entry := range dirEntries { + migrationFiles = append(migrationFiles, entry.Name()) + } + return migrationFiles, nil +} + +func (chMig *ClickhouseMigrator) Close() error { + err1, err2 := chMig.m.Close() + if err1 != nil || err2 != nil { + return fmt.Errorf("source close error: %v, driver close error: %v", err1, err2) + } + return nil +} + +func (chMig *ClickhouseMigrator) SetLogger(logger migrate.Logger) { + chMig.m.Log = logger +} + +func NewMigrator(config ClickHouseConfig) (*ClickhouseMigrator, error) { + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate ClickHouse config: %w", err) + } + + d, err := iofs.New(migrationsFS, "migrations") + if err != nil { + return nil, fmt.Errorf("failed to open Clickhouse migrations iofs: %w", err) + } + + db := clickhouse.OpenDB(&clickhouse.Options{ + Addr: []string{config.ConnectionString}, + Protocol: clickhouse.Native, + TLS: &tls.Config{}, // Not using TLS for now + Auth: clickhouse.Auth{ + Database: config.Database, + Username: config.Username, + Password: config.Password, + }, + }) + + _, err = db.Exec(` + CREATE TABLE IF NOT EXISTS schema_migrations ( + version Int64, + dirty UInt8, + sequence UInt64 + ) + ENGINE = ReplicatedMergeTree + ORDER BY tuple(); + `) + if err != nil { + return nil, fmt.Errorf("failed to create schema_migrations table: %w", err) + } + + driver, err := migch.WithInstance(db, &migch.Config{ + DatabaseName: config.Database, + }) + if err != nil { + return nil, fmt.Errorf("failed to create cl,ickhouse driver: %w", err) + } + + m, err := migrate.NewWithInstance( + "iofs", d, + "clickhouse", driver) + if err != nil { + return nil, fmt.Errorf("failed to create clickhouse migrate instance: %w", err) + } + + return &ClickhouseMigrator{ + m: m, + }, nil +} diff --git a/packages/shared/pkg/chdb/mock.go b/packages/shared/pkg/chdb/mock.go new file mode 100644 index 0000000..d16b991 --- /dev/null +++ b/packages/shared/pkg/chdb/mock.go @@ -0,0 +1,35 @@ +package chdb + +import ( + "context" + + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + + "github.com/e2b-dev/infra/packages/shared/pkg/models/chmodels" +) + +type MockStore struct{} + +func NewMockStore() *MockStore { + return &MockStore{} +} + +func (m *MockStore) Close() error { + return nil +} + +func (m *MockStore) Query(ctx context.Context, query string, args ...any) (driver.Rows, error) { + return nil, nil +} + +func (m *MockStore) Exec(ctx context.Context, query string, args ...any) error { + return nil +} + +func (m *MockStore) InsertMetrics(ctx context.Context, metrics chmodels.Metrics) error { + return nil +} + +func (m *MockStore) QueryMetrics(ctx context.Context, sandboxID, teamID string, start int64, limit int) ([]chmodels.Metrics, error) { + return nil, nil +} diff --git a/packages/shared/pkg/consts/aws.go b/packages/shared/pkg/consts/aws.go deleted file mode 100644 index 1b39096..0000000 --- a/packages/shared/pkg/consts/aws.go +++ /dev/null @@ -1,13 +0,0 @@ -package consts - -import ( - "os" -) - -var ( - AWSAccountID = os.Getenv("AWS_ACCOUNT_ID") - AWSRegion = os.Getenv("AWS_REGION") - ECRRepository = os.Getenv("AWS_ECR_REPOSITORY") - AWS_ACCESS_KEY_ID = os.Getenv("AWS_ACCESS_KEY_ID") - AWS_SECRET_ACCESS_KEY = os.Getenv("AWS_SECRET_ACCESS_KEY") -) diff --git a/packages/shared/pkg/consts/edge.go b/packages/shared/pkg/consts/edge.go new file mode 100644 index 0000000..da7d89e --- /dev/null +++ b/packages/shared/pkg/consts/edge.go @@ -0,0 +1,7 @@ +package consts + +const ( + EdgeApiAuthHeader = "X-API-Key" + EdgeRpcAuthHeader = "authorization" + EdgeRpcServiceInstanceIDHeader = "service-instance-id" +) diff --git a/packages/shared/pkg/consts/envd.go b/packages/shared/pkg/consts/envd.go index 593c461..e4c8f41 100644 --- a/packages/shared/pkg/consts/envd.go +++ b/packages/shared/pkg/consts/envd.go @@ -2,5 +2,4 @@ package consts const ( DefaultEnvdServerPort int64 = 49983 - OldEnvdServerPort int64 = 49982 ) diff --git a/packages/shared/pkg/consts/gcp.go b/packages/shared/pkg/consts/gcp.go index 1196a05..171b2fd 100644 --- a/packages/shared/pkg/consts/gcp.go +++ b/packages/shared/pkg/consts/gcp.go @@ -11,6 +11,7 @@ var ( Domain = os.Getenv("DOMAIN_NAME") DockerRegistry = os.Getenv("GCP_DOCKER_REPOSITORY_NAME") GoogleServiceAccountSecret = os.Getenv("GOOGLE_SERVICE_ACCOUNT_BASE64") + DockerAuthConfig = os.Getenv("DOCKER_AUTH_BASE64") GCPRegion = os.Getenv("GCP_REGION") ) diff --git a/packages/shared/pkg/db/apiKeys.go b/packages/shared/pkg/db/apiKeys.go index 8dc87d4..b0da8f8 100644 --- a/packages/shared/pkg/db/apiKeys.go +++ b/packages/shared/pkg/db/apiKeys.go @@ -11,6 +11,34 @@ import ( "github.com/e2b-dev/infra/packages/shared/pkg/models/teamapikey" ) +type TeamForbiddenError struct { + message string +} + +func (e *TeamForbiddenError) Error() string { + return e.message +} + +type TeamBlockedError struct { + message string +} + +func (e *TeamBlockedError) Error() string { + return e.message +} + +func validateTeamUsage(team *models.Team) error { + if team.IsBanned { + return &TeamForbiddenError{message: "team is banned"} + } + + if team.IsBlocked { + return &TeamBlockedError{message: "team is blocked"} + } + + return nil +} + func (db *DB) GetTeamAuth(ctx context.Context, apiKey string) (*models.Team, *models.Tier, error) { result, err := db. Client. @@ -21,31 +49,17 @@ func (db *DB) GetTeamAuth(ctx context.Context, apiKey string) (*models.Team, *mo QueryTeam(). WithTeamTier(). Only(ctx) - if err != nil { errMsg := fmt.Errorf("failed to get team from API key: %w", err) return nil, nil, errMsg } - // - if result.IsBanned { - errMsg := fmt.Errorf("team is banned") - return nil, nil, errMsg + err = validateTeamUsage(result) + if err != nil { + return nil, nil, err } - // - if result.IsBlocked { - if result.BlockedReason == nil { - errMsg := fmt.Errorf("team was blocked") - return nil, nil, errMsg - } - - errMsg := fmt.Errorf("team was blocked - %s", *result.BlockedReason) - - return nil, nil, errMsg - } - // return result, result.Edges.TeamTier, nil } @@ -54,9 +68,8 @@ func (db *DB) GetUserID(ctx context.Context, token string) (*uuid.UUID, error) { Client. AccessToken. Query(). - Where(accesstoken.ID(token)). + Where(accesstoken.AccessToken(token)). Only(ctx) - if err != nil { errMsg := fmt.Errorf("failed to get user from access token: %w", err) @@ -65,3 +78,19 @@ func (db *DB) GetUserID(ctx context.Context, token string) (*uuid.UUID, error) { return &result.UserID, nil } + +func (db *DB) GetTeamAPIKeys(ctx context.Context, teamID uuid.UUID) ([]*models.TeamAPIKey, error) { + result, err := db. + Client. + TeamAPIKey. + Query(). + Where(teamapikey.TeamID(teamID)). + All(ctx) + if err != nil { + errMsg := fmt.Errorf("failed to get team API keys: %w", err) + + return nil, errMsg + } + + return result, nil +} diff --git a/packages/shared/pkg/db/auth.go b/packages/shared/pkg/db/auth.go new file mode 100644 index 0000000..6922c4d --- /dev/null +++ b/packages/shared/pkg/db/auth.go @@ -0,0 +1,46 @@ +package db + +import ( + "context" + "fmt" + + "github.com/google/uuid" + + "github.com/e2b-dev/infra/packages/shared/pkg/models" + "github.com/e2b-dev/infra/packages/shared/pkg/models/team" + "github.com/e2b-dev/infra/packages/shared/pkg/models/usersteams" +) + +func (db *DB) GetTeamByIDAndUserIDAuth(ctx context.Context, teamID string, userID uuid.UUID) (*models.Team, *models.Tier, error) { + teamIDParsed, err := uuid.Parse(teamID) + if err != nil { + errMsg := fmt.Errorf("failed to parse team ID: %w", err) + + return nil, nil, errMsg + } + + result, err := db. + Client. + Team. + Query(). + Where( + team.ID(teamIDParsed), + team.HasUsersTeamsWith( + usersteams.UserID(userID), + ), + ). + WithTeamTier(). + Only(ctx) + if err != nil { + errMsg := fmt.Errorf("failed to get team from teamID and userID key: %w", err) + + return nil, nil, errMsg + } + + err = validateTeamUsage(result) + if err != nil { + return nil, nil, err + } + + return result, result.Edges.TeamTier, nil +} diff --git a/packages/shared/pkg/db/client.go b/packages/shared/pkg/db/client.go index 23e99b2..57a5fee 100644 --- a/packages/shared/pkg/db/client.go +++ b/packages/shared/pkg/db/client.go @@ -3,6 +3,7 @@ package db import ( "fmt" "os" + "time" "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" @@ -15,9 +16,9 @@ type DB struct { Client *models.Client } -var databaseURL = os.Getenv("CFNDBURL") +var databaseURL = os.Getenv("POSTGRES_CONNECTION_STRING") -func NewClient() (*DB, error) { +func NewClient(maxConns, maxIdle int) (*DB, error) { if databaseURL == "" { return nil, fmt.Errorf("database URL is empty") } @@ -29,7 +30,9 @@ func NewClient() (*DB, error) { // Get the underlying sql.DB object of the driver. db := drv.DB() - db.SetMaxOpenConns(100) + db.SetMaxOpenConns(maxConns) + db.SetMaxIdleConns(maxIdle) + db.SetConnMaxLifetime(time.Minute * 30) client := models.NewClient(models.Driver(drv)) @@ -38,4 +41,4 @@ func NewClient() (*DB, error) { func (db *DB) Close() error { return db.Client.Close() -} \ No newline at end of file +} diff --git a/packages/shared/pkg/db/env_aliases.go b/packages/shared/pkg/db/env_aliases.go index d0a57ff..5b20c9c 100644 --- a/packages/shared/pkg/db/env_aliases.go +++ b/packages/shared/pkg/db/env_aliases.go @@ -14,7 +14,6 @@ func (db *DB) DeleteEnvAlias(ctx context.Context, alias string) error { EnvAlias. DeleteOneID(alias). Exec(ctx) - if err != nil { errMsg := fmt.Errorf("failed to delete env alias '%s': %w", alias, err) @@ -32,7 +31,6 @@ func (db *DB) reserveEnvAlias(ctx context.Context, envID, alias string) error { SetID(alias). SetEnvID(envID). Exec(ctx) - if err != nil { errMsg := fmt.Errorf("failed to reserve env alias '%s': %w", alias, err) @@ -65,7 +63,6 @@ func (db *DB) UpdateEnvAlias(ctx context.Context, alias, envID string) error { envalias.EnvID(envID), envalias.IsRenamable(true)). Exec(ctx) - if err != nil { errMsg := fmt.Errorf("failed to delete env alias '%s' for env '%s': %w", alias, envID, err) @@ -78,7 +75,6 @@ func (db *DB) UpdateEnvAlias(ctx context.Context, alias, envID string) error { SetID(alias). SetEnvID(envID). Exec(ctx) - if err != nil { errMsg := fmt.Errorf("failed to update env alias '%s' for env '%s': %w", alias, envID, err) @@ -86,7 +82,6 @@ func (db *DB) UpdateEnvAlias(ctx context.Context, alias, envID string) error { } err = tx.Commit() - if err != nil { errMsg := fmt.Errorf("committing transaction: %w", err) diff --git a/packages/shared/pkg/db/envs.go b/packages/shared/pkg/db/envs.go index 2a20672..a4665ba 100644 --- a/packages/shared/pkg/db/envs.go +++ b/packages/shared/pkg/db/envs.go @@ -112,8 +112,8 @@ func (db *DB) GetEnvs(ctx context.Context, teamID uuid.UUID) (result []*Template return result, nil } -func (db *DB) GetEnv(ctx context.Context, aliasOrEnvID string) (result *Template, build *models.EnvBuild, err error) { - dbEnv, err := db. +func (db *DB) GetEnv(ctx context.Context, aliasOrEnvID string) (result *models.Env, err error) { + template, err := db. Client. Env. Query(). @@ -122,43 +122,52 @@ func (db *DB) GetEnv(ctx context.Context, aliasOrEnvID string) (result *Template env.HasEnvAliasesWith(envalias.ID(aliasOrEnvID)), env.ID(aliasOrEnvID), ), - env.HasBuildsWith(envbuild.StatusEQ(envbuild.StatusUploaded)), ). WithEnvAliases(func(query *models.EnvAliasQuery) { query.Order(models.Asc(envalias.FieldID)) // TODO: remove once we have only 1 alias per env - }). - WithBuilds(func(query *models.EnvBuildQuery) { - query.Where(envbuild.StatusEQ(envbuild.StatusUploaded)).Order(models.Desc(envbuild.FieldFinishedAt)).Limit(1) }).Only(ctx) notFound := models.IsNotFound(err) if notFound { - return nil, nil, fmt.Errorf("template '%s' not found: %w", aliasOrEnvID, err) + return nil, TemplateNotFound{} } else if err != nil { - return nil, nil, fmt.Errorf("failed to get env '%s': %w", aliasOrEnvID, err) + return nil, fmt.Errorf("failed to get template '%s': %w", aliasOrEnvID, err) } - aliases := make([]string, len(dbEnv.Edges.EnvAliases)) - for i, alias := range dbEnv.Edges.EnvAliases { - aliases[i] = alias.ID + return template, nil +} + +func (db *DB) GetRunningEnvBuilds(ctx context.Context) ([]*models.EnvBuild, error) { + envBuilds, err := db. + Client. + EnvBuild. + Query(). + Where(envbuild.StatusIn(envbuild.StatusWaiting, envbuild.StatusBuilding)). + Order(models.Desc(envbuild.FieldCreatedAt)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get running env builds: %w", err) + } + + return envBuilds, nil +} + +func (db *DB) GetEnvBuild(ctx context.Context, buildID uuid.UUID) (build *models.EnvBuild, err error) { + dbBuild, err := db. + Client. + EnvBuild. + Query(). + Where(envbuild.ID(buildID)). + First(ctx) + + notFound := models.IsNotFound(err) + if notFound { + return nil, TemplateBuildNotFound{} + } else if err != nil { + return nil, fmt.Errorf("failed to get env build '%s': %w", buildID, err) } - build = dbEnv.Edges.Builds[0] - return &Template{ - TemplateID: dbEnv.ID, - BuildID: build.ID.String(), - VCPU: build.Vcpu, - RAMMB: build.RAMMB, - DiskMB: build.FreeDiskSizeMB, - Public: dbEnv.Public, - Aliases: &aliases, - TeamID: dbEnv.TeamID, - CreatedAt: dbEnv.CreatedAt, - UpdatedAt: dbEnv.UpdatedAt, - LastSpawnedAt: dbEnv.LastSpawnedAt, - SpawnCount: dbEnv.SpawnCount, - BuildCount: dbEnv.BuildCount, - }, build, nil + return dbBuild, nil } func (db *DB) CheckBaseEnvHasSnapshots(ctx context.Context, envID string) (result bool, err error) { @@ -184,7 +193,7 @@ func (db *DB) FinishEnvBuild( SetEnvdVersion(envdVersion). Exec(ctx) if err != nil { - return fmt.Errorf("failed to finish env build '%s': %w", buildID, err) + return fmt.Errorf("failed to finish template build '%s': %w", buildID, err) } return nil @@ -199,7 +208,7 @@ func (db *DB) EnvBuildSetStatus( err := db.Client.EnvBuild.Update().Where(envbuild.ID(buildID), envbuild.EnvID(envID)). SetStatus(status).SetFinishedAt(time.Now()).Exec(ctx) if err != nil { - return fmt.Errorf("failed to set env build status %s for '%s': %w", status, buildID, err) + return fmt.Errorf("failed to set template build status %s for '%s': %w", status, buildID, err) } return nil diff --git a/packages/shared/pkg/db/errors.go b/packages/shared/pkg/db/errors.go new file mode 100644 index 0000000..3745ca6 --- /dev/null +++ b/packages/shared/pkg/db/errors.go @@ -0,0 +1,33 @@ +package db + +type ErrNotFound error + +type TemplateNotFound struct{ ErrNotFound } + +func (TemplateNotFound) Error() string { + return "Template not found" +} + +type TemplateBuildNotFound struct{ ErrNotFound } + +func (TemplateBuildNotFound) Error() string { + return "Template build not found" +} + +type SnapshotNotFound struct{ ErrNotFound } + +func (SnapshotNotFound) Error() string { + return "Snapshot not found" +} + +type BuildNotFound struct{ ErrNotFound } + +func (BuildNotFound) Error() string { + return "Build not found" +} + +type EnvNotFound struct{ ErrNotFound } + +func (EnvNotFound) Error() string { + return "Env not found" +} diff --git a/packages/shared/pkg/db/snapshot.go b/packages/shared/pkg/db/snapshot.go index dcab7d1..f1eedbd 100644 --- a/packages/shared/pkg/db/snapshot.go +++ b/packages/shared/pkg/db/snapshot.go @@ -3,18 +3,20 @@ package db import ( "context" "fmt" + "time" + + "github.com/google/uuid" "github.com/e2b-dev/infra/packages/shared/pkg/id" "github.com/e2b-dev/infra/packages/shared/pkg/models" "github.com/e2b-dev/infra/packages/shared/pkg/models/env" "github.com/e2b-dev/infra/packages/shared/pkg/models/envbuild" "github.com/e2b-dev/infra/packages/shared/pkg/models/snapshot" - - "github.com/google/uuid" ) type SnapshotInfo struct { SandboxID string + SandboxStartedAt time.Time BaseTemplateID string VCPU int64 RAMMB int64 @@ -23,6 +25,7 @@ type SnapshotInfo struct { KernelVersion string FirecrackerVersion string EnvdVersion string + EnvdSecured bool } // Check if there exists snapshot with the ID, if yes then return a new @@ -33,6 +36,9 @@ func (db *DB) NewSnapshotBuild( teamID uuid.UUID, ) (*models.EnvBuild, error) { tx, err := db.Client.BeginTx(ctx, nil) + if err != nil { + return nil, fmt.Errorf("failed to start transaction: %w", err) + } defer tx.Rollback() s, err := tx. @@ -68,25 +74,28 @@ func (db *DB) NewSnapshotBuild( return nil, fmt.Errorf("failed to create env '%s': %w", snapshotConfig.SandboxID, err) } - s, err = tx. + err = tx. Snapshot. Create(). SetSandboxID(snapshotConfig.SandboxID). SetBaseEnvID(snapshotConfig.BaseTemplateID). SetEnv(e). SetMetadata(snapshotConfig.Metadata). - Save(ctx) + SetSandboxStartedAt(snapshotConfig.SandboxStartedAt). + SetEnvSecure(snapshotConfig.EnvdSecured). + Exec(ctx) if err != nil { return nil, fmt.Errorf("failed to create snapshot '%s': %w", snapshotConfig.SandboxID, err) } } else { e = s.Edges.Env // Update existing snapshot with new metadata and pause time - s, err = tx. + err = tx. Snapshot. UpdateOne(s). SetMetadata(snapshotConfig.Metadata). - Save(ctx) + SetSandboxStartedAt(snapshotConfig.SandboxStartedAt). + Exec(ctx) if err != nil { return nil, fmt.Errorf("failed to update snapshot '%s': %w", snapshotConfig.SandboxID, err) } @@ -102,7 +111,7 @@ func (db *DB) NewSnapshotBuild( SetKernelVersion(snapshotConfig.KernelVersion). SetFirecrackerVersion(snapshotConfig.FirecrackerVersion). SetEnvdVersion(snapshotConfig.EnvdVersion). - SetStatus(envbuild.StatusBuilding). + SetStatus(envbuild.StatusSnapshotting). SetTotalDiskSizeMB(snapshotConfig.TotalDiskSizeMB). Save(ctx) if err != nil { @@ -117,40 +126,6 @@ func (db *DB) NewSnapshotBuild( return b, nil } -func (db *DB) GetLastSnapshot(ctx context.Context, sandboxID string, teamID uuid.UUID) ( - *models.Snapshot, - *models.EnvBuild, - error, -) { - e, err := db. - Client. - Env. - Query(). - Where( - env.HasBuildsWith(envbuild.StatusEQ(envbuild.StatusSuccess)), - env.HasSnapshotsWith(snapshot.SandboxID(sandboxID)), - env.TeamID(teamID), - ). - WithSnapshots(func(query *models.SnapshotQuery) { - query.Where(snapshot.SandboxID(sandboxID)).Only(ctx) - }). - WithBuilds(func(query *models.EnvBuildQuery) { - query.Where(envbuild.StatusEQ(envbuild.StatusSuccess)).Order(models.Desc(envbuild.FieldFinishedAt)).Only(ctx) - }).Only(ctx) - - notFound := models.IsNotFound(err) - - if notFound { - return nil, nil, fmt.Errorf("no snapshot build found for '%s'", sandboxID) - } - - if err != nil { - return nil, nil, fmt.Errorf("failed to get snapshot build for '%s': %w", sandboxID, err) - } - - return e.Edges.Snapshots[0], e.Edges.Builds[0], nil -} - func (db *DB) GetSnapshotBuilds(ctx context.Context, sandboxID string, teamID uuid.UUID) ( *models.Env, []*models.EnvBuild, @@ -164,20 +139,17 @@ func (db *DB) GetSnapshotBuilds(ctx context.Context, sandboxID string, teamID uu env.HasSnapshotsWith(snapshot.SandboxID(sandboxID)), env.TeamID(teamID), ). - WithSnapshots(func(query *models.SnapshotQuery) { - query.Where(snapshot.SandboxID(sandboxID)).Only(ctx) - }). WithBuilds(). Only(ctx) notFound := models.IsNotFound(err) - if err != nil { - return nil, nil, fmt.Errorf("failed to get snapshot build for '%s': %w", sandboxID, err) + if notFound { + return nil, nil, EnvNotFound{} } - if notFound { - return nil, nil, fmt.Errorf("no snapshot build found for '%s'", sandboxID) + if err != nil { + return nil, nil, fmt.Errorf("failed to get snapshot build for '%s': %w", sandboxID, err) } return e, e.Edges.Builds, nil diff --git a/packages/shared/pkg/db/users.go b/packages/shared/pkg/db/users.go deleted file mode 100644 index 6f88a8e..0000000 --- a/packages/shared/pkg/db/users.go +++ /dev/null @@ -1,34 +0,0 @@ -package db - -import ( - "context" - "fmt" - - "github.com/google/uuid" - - "github.com/e2b-dev/infra/packages/shared/pkg/models" - "github.com/e2b-dev/infra/packages/shared/pkg/models/team" - "github.com/e2b-dev/infra/packages/shared/pkg/models/user" - "github.com/e2b-dev/infra/packages/shared/pkg/models/usersteams" -) - -func (db *DB) GetTeams(ctx context.Context, userID uuid.UUID) ([]*models.Team, error) { - t, err := db. - Client. - Team. - Query(). - Where(team.HasUsersWith(user.ID(userID))). - WithTeamTier(). - WithUsersTeams(func(query *models.UsersTeamsQuery) { - query.Where(usersteams.UserID(userID)) - }). - All(ctx) - - if err != nil { - errMsg := fmt.Errorf("failed to get default team from user: %w", err) - - return nil, errMsg - } - - return t, nil -} diff --git a/packages/shared/pkg/env/env.go b/packages/shared/pkg/env/env.go index 708877e..17b36c3 100644 --- a/packages/shared/pkg/env/env.go +++ b/packages/shared/pkg/env/env.go @@ -1,17 +1,24 @@ package env -import "os" +import ( + "os" + "strconv" +) -var environment = GetEnv("ENVIRONMENT", "local") - -func IsProduction() bool { - return environment == "prod" -} +var environment = GetEnv("ENVIRONMENT", "prod") func IsLocal() bool { return environment == "local" } +func IsDevelopment() bool { + return environment == "dev" || environment == "local" +} + +func IsDebug() bool { + return GetEnv("E2B_DEBUG", "false") == "true" +} + func GetEnv(key, defaultValue string) string { value := os.Getenv(key) if len(value) == 0 { @@ -19,3 +26,16 @@ func GetEnv(key, defaultValue string) string { } return value } + +func GetEnvAsInt(key string, defaultValue int) (int, error) { + if v := os.Getenv(key); v != "" { + value, err := strconv.Atoi(v) + if err != nil { + return defaultValue, err + } + + return value, nil + } + + return defaultValue, nil +} diff --git a/packages/shared/pkg/feature-flags/client.go b/packages/shared/pkg/feature-flags/client.go new file mode 100644 index 0000000..f4ae66b --- /dev/null +++ b/packages/shared/pkg/feature-flags/client.go @@ -0,0 +1,58 @@ +package feature_flags + +import ( + "context" + "os" + "time" + + ldclient "github.com/launchdarkly/go-server-sdk/v7" + "github.com/launchdarkly/go-server-sdk/v7/testhelpers/ldtestdata" + "go.uber.org/zap" +) + +// LaunchDarklyOfflineStore is a test fixture that provides dynamically updatable feature flag state +var LaunchDarklyOfflineStore = ldtestdata.DataSource() + +var launchDarklyApiKey = os.Getenv("LAUNCH_DARKLY_API_KEY") + +const waitForInit = 5 * time.Second + +type Client struct { + Ld *ldclient.LDClient +} + +func NewClient() (*Client, error) { + var ldClient *ldclient.LDClient + var err error + + if launchDarklyApiKey == "" { + // waitFor has to be 0 for offline store + ldClient, err = ldclient.MakeCustomClient("", ldclient.Config{DataSource: LaunchDarklyOfflineStore}, 0) + if err != nil { + return nil, err + } + + return &Client{Ld: ldClient}, nil + } + + ldClient, err = ldclient.MakeClient(launchDarklyApiKey, waitForInit) + if err != nil { + return nil, err + } + + return &Client{Ld: ldClient}, nil +} + +func (c *Client) Close(ctx context.Context) error { + if c.Ld == nil { + return nil + } + + err := c.Ld.Close() + if err != nil { + zap.L().Error("Error during launch-darkly client shutdown", zap.Error(err)) + return err + } + + return nil +} diff --git a/packages/shared/pkg/feature-flags/client_test.go b/packages/shared/pkg/feature-flags/client_test.go new file mode 100644 index 0000000..08d4c8d --- /dev/null +++ b/packages/shared/pkg/feature-flags/client_test.go @@ -0,0 +1,35 @@ +package feature_flags + +import ( + "context" + "testing" + + "github.com/launchdarkly/go-sdk-common/v3/ldcontext" + "github.com/stretchr/testify/assert" +) + +const ( + flagName = "demo-feature-flag" +) + +func TestOfflineDatastore(t *testing.T) { + clientCtx := ldcontext.NewBuilder(flagName).Build() + client, err := NewClient() + defer func() { + _ = client.Close(context.Background()) + }() + + assert.NoError(t, err) + + // value is not set so it should be default (false) + flagValue, _ := client.Ld.BoolVariation(flagName, clientCtx, false) + assert.False(t, flagValue) + + LaunchDarklyOfflineStore.Update( + LaunchDarklyOfflineStore.Flag(flagName).VariationForAll(true), + ) + + // value is set manually in datastore and should be taken from there + flagValue, _ = client.Ld.BoolVariation(flagName, clientCtx, false) + assert.True(t, flagValue) +} diff --git a/packages/shared/pkg/feature-flags/flags.go b/packages/shared/pkg/feature-flags/flags.go new file mode 100644 index 0000000..ef96089 --- /dev/null +++ b/packages/shared/pkg/feature-flags/flags.go @@ -0,0 +1,8 @@ +package feature_flags + +// Flag for enabling writing metrics for sandbox +// https://app.launchdarkly.com/projects/default/flags/sandbox-metrics-write +const ( + MetricsWriteFlagName = "sandbox-metrics-write" + MetricsWriteDefault = false +) diff --git a/packages/shared/pkg/gin_utils/middleware/exclude.go b/packages/shared/pkg/gin_utils/middleware/exclude.go deleted file mode 100644 index 3d5a32f..0000000 --- a/packages/shared/pkg/gin_utils/middleware/exclude.go +++ /dev/null @@ -1,67 +0,0 @@ -package middleware - -import ( - "strings" - - "github.com/gin-gonic/gin" -) - -func ExcludeRoutes(middleware gin.HandlerFunc, notlogged ...string) gin.HandlerFunc { - return func(c *gin.Context) { - path := c.Request.URL.Path - - if !shouldSkip(path, notlogged) { - middleware(c) - } else { - c.Next() - } - } -} - -func IncludeRoutes(middleware gin.HandlerFunc, included ...string) gin.HandlerFunc { - return func(c *gin.Context) { - path := c.Request.URL.Path - - if shouldInclude(path, included) { - middleware(c) - } else { - c.Next() - } - } -} - -func shouldInclude(path string, patterns []string) bool { - for _, pattern := range patterns { - if matchPattern(path, pattern) { - return true - } - } - return false -} - -func shouldSkip(path string, patterns []string) bool { - for _, pattern := range patterns { - if matchPattern(path, pattern) { - return true - } - } - - return false -} - -func matchPattern(path, pattern string) bool { - pathSegments := strings.Split(path, "/") - patternSegments := strings.Split(pattern, "/") - - if len(pathSegments) != len(patternSegments) { - return false - } - - for i := range pathSegments { - if patternSegments[i] != pathSegments[i] && !strings.HasPrefix(patternSegments[i], ":") { - return false - } - } - - return true -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/config.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/config.go deleted file mode 100644 index e1ab407..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/config.go +++ /dev/null @@ -1,43 +0,0 @@ -package metrics - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" -) - -type config struct { - recordInFlight bool - recordSize bool - recordDuration bool - groupedStatus bool - recorder Recorder - attributes func(serverName, route string, request *http.Request) []attribute.KeyValue - shouldRecord func(serverName, route string, request *http.Request) bool -} - -func defaultConfig() *config { - return &config{ - recordInFlight: true, - recordDuration: true, - recordSize: true, - groupedStatus: true, - attributes: DefaultAttributes, - shouldRecord: func(_, _ string, _ *http.Request) bool { - return true - }, - } -} - -var DefaultAttributes = func(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - semconv.HTTPMethodKey.String(request.Method), - } - - if route != "" { - attrs = append(attrs, semconv.HTTPRouteKey.String(route)) - } - - return attrs -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/middleware.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/middleware.go deleted file mode 100644 index 9dbbcc9..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/middleware.go +++ /dev/null @@ -1,60 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/gin-gonic/gin" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" -) - -// Middleware returns middleware that will trace incoming requests. -// The service parameter should describe the name of the (virtual) -// server handling the request. -func Middleware(service string, options ...Option) gin.HandlerFunc { - cfg := defaultConfig() - for _, option := range options { - option.apply(cfg) - } - - recorder := cfg.recorder - if recorder == nil { - recorder = GetRecorder(service) - } - - return func(ginCtx *gin.Context) { - ctx := ginCtx.Request.Context() - - route := ginCtx.FullPath() - if len(route) <= 0 { - route = "nonconfigured" - } - - if !cfg.shouldRecord(service, route, ginCtx.Request) { - ginCtx.Next() - - return - } - - start := time.Now() - reqAttributes := cfg.attributes(service, route, ginCtx.Request) - - defer func() { - resAttributes := append( - reqAttributes[0:0], - reqAttributes..., - ) - - if cfg.groupedStatus { - code := int(ginCtx.Writer.Status()/100) * 100 - resAttributes = append(resAttributes, semconv.HTTPStatusCodeKey.Int(code)) - } else { - resAttributes = append(resAttributes, semconv.HTTPAttributesFromHTTPStatusCode(ginCtx.Writer.Status())...) - } - - duration := time.Since(start) - recorder.ObserveHTTPRequestDuration(ctx, duration, resAttributes) - }() - - ginCtx.Next() - } -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/option.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/option.go deleted file mode 100644 index f01478d..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/option.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" -) - -// Option applies a configuration to the given config -type Option interface { - apply(cfg *config) -} - -type optionFunc func(cfg *config) - -func (fn optionFunc) apply(cfg *config) { - fn(cfg) -} - -// WithAttributes sets a func using which what attributes to be recorded can be specified. -// By default the DefaultAttributes is used -func WithAttributes(attributes func(serverName, route string, request *http.Request) []attribute.KeyValue) Option { - return optionFunc(func(cfg *config) { - cfg.attributes = attributes - }) -} - -// WithRecordInFlight determines whether to record In Flight Requests or not -// By default the recordInFlight is true -func WithRecordInFlightDisabled() Option { - return optionFunc(func(cfg *config) { - cfg.recordInFlight = false - }) -} - -// WithRecordDuration determines whether to record Duration of Requests or not -// By default the recordDuration is true -func WithRecordDurationDisabled() Option { - return optionFunc(func(cfg *config) { - cfg.recordDuration = false - }) -} - -// WithRecordSize determines whether to record Size of Requests and Responses or not -// By default the recordSize is true -func WithRecordSizeDisabled() Option { - return optionFunc(func(cfg *config) { - cfg.recordSize = false - }) -} - -// WithGroupedStatus determines whether to group the response status codes or not. If true 2xx, 3xx will be stored -// By default the groupedStatus is true -func WithGroupedStatusDisabled() Option { - return optionFunc(func(cfg *config) { - cfg.groupedStatus = false - }) -} - -// WithRecorder sets a recorder for recording requests -// By default the open telemetry recorder is used -func WithRecorder(recorder Recorder) Option { - return optionFunc(func(cfg *config) { - cfg.recorder = recorder - }) -} - -// WithShouldRecordFunc sets a func using which whether a record should be recorded -// By default the all api calls are recorded -func WithShouldRecordFunc(shouldRecord func(serverName, route string, request *http.Request) bool) Option { - return optionFunc(func(cfg *config) { - cfg.shouldRecord = shouldRecord - }) -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/otelrecorder.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/otelrecorder.go deleted file mode 100644 index dd5e39d..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/otelrecorder.go +++ /dev/null @@ -1,44 +0,0 @@ -package metrics - -import ( - "context" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" -) - -// Recorder knows how to record and measure the metrics. This -// has the required methods to be used with the HTTP -// middlewares. -type otelRecorder struct { - totalDuration metric.Float64Histogram -} - -func GetRecorder(metricsPrefix string) Recorder { - metricName := func(metricName string) string { - if len(metricsPrefix) > 0 { - return metricsPrefix + "." + metricName - } - - return metricName - } - - meter := otel.Meter("api-metrics", metric.WithInstrumentationVersion(SemVersion())) - - totalDuration, _ := meter.Float64Histogram( - metricName("http.server.duration"), - metric.WithDescription("Time Taken by request"), - metric.WithUnit("ms"), - ) - - return &otelRecorder{ - totalDuration: totalDuration, - } -} - -// ObserveHTTPRequestDuration measures the duration of an HTTP request. -func (r *otelRecorder) ObserveHTTPRequestDuration(ctx context.Context, duration time.Duration, attributes []attribute.KeyValue) { - r.totalDuration.Record(ctx, float64(duration/time.Millisecond), metric.WithAttributes(attributes...)) -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/recorder.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/recorder.go deleted file mode 100644 index fe0d46b..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/recorder.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -type Recorder interface { - // ObserveHTTPRequestDuration measures the duration of an HTTP request. - ObserveHTTPRequestDuration(ctx context.Context, duration time.Duration, attributes []attribute.KeyValue) -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/metrics/version.go b/packages/shared/pkg/gin_utils/middleware/otel/metrics/version.go deleted file mode 100644 index f5025b0..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/metrics/version.go +++ /dev/null @@ -1,11 +0,0 @@ -package metrics - -// Version is the current release version of the gin instrumentation. -func Version() string { - return "1.0.0" -} - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -func SemVersion() string { - return "semver:" + Version() -} diff --git a/packages/shared/pkg/gin_utils/middleware/otel/tracing/middleware.go b/packages/shared/pkg/gin_utils/middleware/otel/tracing/middleware.go deleted file mode 100644 index 7ef9646..0000000 --- a/packages/shared/pkg/gin_utils/middleware/otel/tracing/middleware.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Based on https://github.com/DataDog/dd-trace-go/blob/8fb554ff7cf694267f9077ae35e27ce4689ed8b6/contrib/gin-gonic/gin/gintrace.go - -package tracing // import "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" - -import ( - "fmt" - - "github.com/gin-gonic/gin" - "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" - oteltrace "go.opentelemetry.io/otel/trace" -) - -const ( - tracerKey = "otel-go-contrib-tracer" - tracerName = "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" -) - -type config struct { - TracerProvider oteltrace.TracerProvider - Propagators propagation.TextMapPropagator -} - -// Middleware returns middleware that will trace incoming requests. -// The service parameter should describe the name of the (virtual) -// server handling the request. -func Middleware(service string) gin.HandlerFunc { - cfg := config{} - if cfg.TracerProvider == nil { - cfg.TracerProvider = otel.GetTracerProvider() - } - tracer := cfg.TracerProvider.Tracer( - tracerName, - oteltrace.WithInstrumentationVersion(otelgin.Version()), - ) - if cfg.Propagators == nil { - cfg.Propagators = otel.GetTextMapPropagator() - } - return func(c *gin.Context) { - c.Set(tracerKey, tracer) - savedCtx := c.Request.Context() - defer func() { - c.Request = c.Request.WithContext(savedCtx) - }() - ctx := cfg.Propagators.Extract(savedCtx, propagation.HeaderCarrier(c.Request.Header)) - opts := []oteltrace.SpanStartOption{ - oteltrace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", c.Request)...), - oteltrace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(c.Request)...), - oteltrace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(service, c.FullPath(), c.Request)...), - oteltrace.WithSpanKind(oteltrace.SpanKindServer), - } - spanName := c.FullPath() - if spanName == "" { - spanName = fmt.Sprintf("HTTP %s route not found", c.Request.Method) - } - ctx, span := tracer.Start(ctx, spanName, opts...) - defer span.End() - - // pass the span through the request context - c.Request = c.Request.WithContext(ctx) - - // serve the request to the next middleware - c.Next() - - status := c.Writer.Status() - attrs := semconv.HTTPAttributesFromHTTPStatusCode(status) - spanStatus, spanMessage := semconv.SpanStatusFromHTTPStatusCode(status) - span.SetAttributes(attrs...) - span.SetStatus(spanStatus, spanMessage) - if len(c.Errors) > 0 { - span.SetAttributes(attribute.String("gin.errors", c.Errors.String())) - } - } -} diff --git a/packages/shared/pkg/grpc/connection.go b/packages/shared/pkg/grpc/connection.go deleted file mode 100644 index 6b769a8..0000000 --- a/packages/shared/pkg/grpc/connection.go +++ /dev/null @@ -1,68 +0,0 @@ -package grpc - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "regexp" - "strings" - - "google.golang.org/grpc" - "google.golang.org/grpc/backoff" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" -) - -var regex = regexp.MustCompile(`http[s]?://`) - -type ClientConnInterface interface { - Invoke(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error - GetState() connectivity.State - NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) - Close() error -} - -// TODO: Fix Host <-> Url -func GetConnection(host string, safe bool, options ...grpc.DialOption) (ClientConnInterface, error) { - if strings.TrimSpace(host) == "" { - fmt.Println("Host for gRPC not set, using dummy connection") - - return &DummyConn{}, nil - } - - options = append(options, grpc.WithConnectParams(grpc.ConnectParams{Backoff: backoff.DefaultConfig})) - - host = regex.ReplaceAllString(host, "") - if strings.HasPrefix(host, "localhost") || !safe { - options = append(options, grpc.WithTransportCredentials(insecure.NewCredentials())) - conn, err := grpc.Dial(host, options...) - if err != nil { - return nil, fmt.Errorf("failed to dial: %w", err) - } - - return conn, nil - } - - systemRoots, err := x509.SystemCertPool() - if err != nil { - errMsg := fmt.Errorf("failed to read system root certificate pool: %w", err) - - return nil, errMsg - } - - cred := credentials.NewTLS(&tls.Config{ - RootCAs: systemRoots, - MinVersion: tls.VersionTLS13, - }) - - options = append(options, grpc.WithAuthority(host), grpc.WithTransportCredentials(cred)) - conn, err := grpc.Dial(host+":443", options...) - - if err != nil { - return nil, fmt.Errorf("failed to dial: %w", err) - } - - return conn, nil -} diff --git a/packages/shared/pkg/grpc/dummy_client.go b/packages/shared/pkg/grpc/dummy_client.go deleted file mode 100644 index 230c999..0000000 --- a/packages/shared/pkg/grpc/dummy_client.go +++ /dev/null @@ -1,26 +0,0 @@ -package grpc - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" -) - -type DummyConn struct{} - -func (dc *DummyConn) Invoke(_ context.Context, _ string, _ any, _ any, _ ...grpc.CallOption) error { - return nil -} - -func (dc *DummyConn) GetState() connectivity.State { - return connectivity.Ready -} - -func (dc *DummyConn) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { - return nil, nil -} - -func (dc *DummyConn) Close() error { - return nil -} diff --git a/packages/shared/pkg/grpc/envd/filesystem/filesystem.pb.go b/packages/shared/pkg/grpc/envd/filesystem/filesystem.pb.go new file mode 100644 index 0000000..f9a19f5 --- /dev/null +++ b/packages/shared/pkg/grpc/envd/filesystem/filesystem.pb.go @@ -0,0 +1,1765 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: filesystem/filesystem.proto + +package filesystem + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FileType int32 + +const ( + FileType_FILE_TYPE_UNSPECIFIED FileType = 0 + FileType_FILE_TYPE_FILE FileType = 1 + FileType_FILE_TYPE_DIRECTORY FileType = 2 +) + +// Enum value maps for FileType. +var ( + FileType_name = map[int32]string{ + 0: "FILE_TYPE_UNSPECIFIED", + 1: "FILE_TYPE_FILE", + 2: "FILE_TYPE_DIRECTORY", + } + FileType_value = map[string]int32{ + "FILE_TYPE_UNSPECIFIED": 0, + "FILE_TYPE_FILE": 1, + "FILE_TYPE_DIRECTORY": 2, + } +) + +func (x FileType) Enum() *FileType { + p := new(FileType) + *p = x + return p +} + +func (x FileType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileType) Descriptor() protoreflect.EnumDescriptor { + return file_filesystem_filesystem_proto_enumTypes[0].Descriptor() +} + +func (FileType) Type() protoreflect.EnumType { + return &file_filesystem_filesystem_proto_enumTypes[0] +} + +func (x FileType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FileType.Descriptor instead. +func (FileType) EnumDescriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{0} +} + +type EventType int32 + +const ( + EventType_EVENT_TYPE_UNSPECIFIED EventType = 0 + EventType_EVENT_TYPE_CREATE EventType = 1 + EventType_EVENT_TYPE_WRITE EventType = 2 + EventType_EVENT_TYPE_REMOVE EventType = 3 + EventType_EVENT_TYPE_RENAME EventType = 4 + EventType_EVENT_TYPE_CHMOD EventType = 5 +) + +// Enum value maps for EventType. +var ( + EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNSPECIFIED", + 1: "EVENT_TYPE_CREATE", + 2: "EVENT_TYPE_WRITE", + 3: "EVENT_TYPE_REMOVE", + 4: "EVENT_TYPE_RENAME", + 5: "EVENT_TYPE_CHMOD", + } + EventType_value = map[string]int32{ + "EVENT_TYPE_UNSPECIFIED": 0, + "EVENT_TYPE_CREATE": 1, + "EVENT_TYPE_WRITE": 2, + "EVENT_TYPE_REMOVE": 3, + "EVENT_TYPE_RENAME": 4, + "EVENT_TYPE_CHMOD": 5, + } +) + +func (x EventType) Enum() *EventType { + p := new(EventType) + *p = x + return p +} + +func (x EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EventType) Descriptor() protoreflect.EnumDescriptor { + return file_filesystem_filesystem_proto_enumTypes[1].Descriptor() +} + +func (EventType) Type() protoreflect.EnumType { + return &file_filesystem_filesystem_proto_enumTypes[1] +} + +func (x EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EventType.Descriptor instead. +func (EventType) EnumDescriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{1} +} + +type MoveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` +} + +func (x *MoveRequest) Reset() { + *x = MoveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveRequest) ProtoMessage() {} + +func (x *MoveRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveRequest.ProtoReflect.Descriptor instead. +func (*MoveRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{0} +} + +func (x *MoveRequest) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +func (x *MoveRequest) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +type MoveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *MoveResponse) Reset() { + *x = MoveResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveResponse) ProtoMessage() {} + +func (x *MoveResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveResponse.ProtoReflect.Descriptor instead. +func (*MoveResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{1} +} + +func (x *MoveResponse) GetEntry() *EntryInfo { + if x != nil { + return x.Entry + } + return nil +} + +type MakeDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *MakeDirRequest) Reset() { + *x = MakeDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MakeDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MakeDirRequest) ProtoMessage() {} + +func (x *MakeDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MakeDirRequest.ProtoReflect.Descriptor instead. +func (*MakeDirRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{2} +} + +func (x *MakeDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type MakeDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *MakeDirResponse) Reset() { + *x = MakeDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MakeDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MakeDirResponse) ProtoMessage() {} + +func (x *MakeDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MakeDirResponse.ProtoReflect.Descriptor instead. +func (*MakeDirResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{3} +} + +func (x *MakeDirResponse) GetEntry() *EntryInfo { + if x != nil { + return x.Entry + } + return nil +} + +type RemoveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *RemoveRequest) Reset() { + *x = RemoveRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoveRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveRequest) ProtoMessage() {} + +func (x *RemoveRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveRequest.ProtoReflect.Descriptor instead. +func (*RemoveRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{4} +} + +func (x *RemoveRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type RemoveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RemoveResponse) Reset() { + *x = RemoveResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoveResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveResponse) ProtoMessage() {} + +func (x *RemoveResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveResponse.ProtoReflect.Descriptor instead. +func (*RemoveResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{5} +} + +type StatRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *StatRequest) Reset() { + *x = StatRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatRequest) ProtoMessage() {} + +func (x *StatRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatRequest.ProtoReflect.Descriptor instead. +func (*StatRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{6} +} + +func (x *StatRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type StatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *EntryInfo `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *StatResponse) Reset() { + *x = StatResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatResponse) ProtoMessage() {} + +func (x *StatResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatResponse.ProtoReflect.Descriptor instead. +func (*StatResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{7} +} + +func (x *StatResponse) GetEntry() *EntryInfo { + if x != nil { + return x.Entry + } + return nil +} + +type EntryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type FileType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.FileType" json:"type,omitempty"` + Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *EntryInfo) Reset() { + *x = EntryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryInfo) ProtoMessage() {} + +func (x *EntryInfo) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryInfo.ProtoReflect.Descriptor instead. +func (*EntryInfo) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{8} +} + +func (x *EntryInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EntryInfo) GetType() FileType { + if x != nil { + return x.Type + } + return FileType_FILE_TYPE_UNSPECIFIED +} + +func (x *EntryInfo) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type ListDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Depth uint32 `protobuf:"varint,2,opt,name=depth,proto3" json:"depth,omitempty"` +} + +func (x *ListDirRequest) Reset() { + *x = ListDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDirRequest) ProtoMessage() {} + +func (x *ListDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDirRequest.ProtoReflect.Descriptor instead. +func (*ListDirRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{9} +} + +func (x *ListDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *ListDirRequest) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} + +type ListDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*EntryInfo `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *ListDirResponse) Reset() { + *x = ListDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDirResponse) ProtoMessage() {} + +func (x *ListDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDirResponse.ProtoReflect.Descriptor instead. +func (*ListDirResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{10} +} + +func (x *ListDirResponse) GetEntries() []*EntryInfo { + if x != nil { + return x.Entries + } + return nil +} + +type WatchDirRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *WatchDirRequest) Reset() { + *x = WatchDirRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchDirRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchDirRequest) ProtoMessage() {} + +func (x *WatchDirRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchDirRequest.ProtoReflect.Descriptor instead. +func (*WatchDirRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{11} +} + +func (x *WatchDirRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *WatchDirRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type FilesystemEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type EventType `protobuf:"varint,2,opt,name=type,proto3,enum=filesystem.EventType" json:"type,omitempty"` +} + +func (x *FilesystemEvent) Reset() { + *x = FilesystemEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilesystemEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilesystemEvent) ProtoMessage() {} + +func (x *FilesystemEvent) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilesystemEvent.ProtoReflect.Descriptor instead. +func (*FilesystemEvent) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{12} +} + +func (x *FilesystemEvent) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FilesystemEvent) GetType() EventType { + if x != nil { + return x.Type + } + return EventType_EVENT_TYPE_UNSPECIFIED +} + +type WatchDirResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Event: + // + // *WatchDirResponse_Start + // *WatchDirResponse_Filesystem + // *WatchDirResponse_Keepalive + Event isWatchDirResponse_Event `protobuf_oneof:"event"` +} + +func (x *WatchDirResponse) Reset() { + *x = WatchDirResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchDirResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchDirResponse) ProtoMessage() {} + +func (x *WatchDirResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchDirResponse.ProtoReflect.Descriptor instead. +func (*WatchDirResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13} +} + +func (m *WatchDirResponse) GetEvent() isWatchDirResponse_Event { + if m != nil { + return m.Event + } + return nil +} + +func (x *WatchDirResponse) GetStart() *WatchDirResponse_StartEvent { + if x, ok := x.GetEvent().(*WatchDirResponse_Start); ok { + return x.Start + } + return nil +} + +func (x *WatchDirResponse) GetFilesystem() *FilesystemEvent { + if x, ok := x.GetEvent().(*WatchDirResponse_Filesystem); ok { + return x.Filesystem + } + return nil +} + +func (x *WatchDirResponse) GetKeepalive() *WatchDirResponse_KeepAlive { + if x, ok := x.GetEvent().(*WatchDirResponse_Keepalive); ok { + return x.Keepalive + } + return nil +} + +type isWatchDirResponse_Event interface { + isWatchDirResponse_Event() +} + +type WatchDirResponse_Start struct { + Start *WatchDirResponse_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` +} + +type WatchDirResponse_Filesystem struct { + Filesystem *FilesystemEvent `protobuf:"bytes,2,opt,name=filesystem,proto3,oneof"` +} + +type WatchDirResponse_Keepalive struct { + Keepalive *WatchDirResponse_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` +} + +func (*WatchDirResponse_Start) isWatchDirResponse_Event() {} + +func (*WatchDirResponse_Filesystem) isWatchDirResponse_Event() {} + +func (*WatchDirResponse_Keepalive) isWatchDirResponse_Event() {} + +type CreateWatcherRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` +} + +func (x *CreateWatcherRequest) Reset() { + *x = CreateWatcherRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateWatcherRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWatcherRequest) ProtoMessage() {} + +func (x *CreateWatcherRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWatcherRequest.ProtoReflect.Descriptor instead. +func (*CreateWatcherRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateWatcherRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CreateWatcherRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type CreateWatcherResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` +} + +func (x *CreateWatcherResponse) Reset() { + *x = CreateWatcherResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateWatcherResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWatcherResponse) ProtoMessage() {} + +func (x *CreateWatcherResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWatcherResponse.ProtoReflect.Descriptor instead. +func (*CreateWatcherResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateWatcherResponse) GetWatcherId() string { + if x != nil { + return x.WatcherId + } + return "" +} + +type GetWatcherEventsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` +} + +func (x *GetWatcherEventsRequest) Reset() { + *x = GetWatcherEventsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetWatcherEventsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWatcherEventsRequest) ProtoMessage() {} + +func (x *GetWatcherEventsRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWatcherEventsRequest.ProtoReflect.Descriptor instead. +func (*GetWatcherEventsRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{16} +} + +func (x *GetWatcherEventsRequest) GetWatcherId() string { + if x != nil { + return x.WatcherId + } + return "" +} + +type GetWatcherEventsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Events []*FilesystemEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (x *GetWatcherEventsResponse) Reset() { + *x = GetWatcherEventsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetWatcherEventsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWatcherEventsResponse) ProtoMessage() {} + +func (x *GetWatcherEventsResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWatcherEventsResponse.ProtoReflect.Descriptor instead. +func (*GetWatcherEventsResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{17} +} + +func (x *GetWatcherEventsResponse) GetEvents() []*FilesystemEvent { + if x != nil { + return x.Events + } + return nil +} + +type RemoveWatcherRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WatcherId string `protobuf:"bytes,1,opt,name=watcher_id,json=watcherId,proto3" json:"watcher_id,omitempty"` +} + +func (x *RemoveWatcherRequest) Reset() { + *x = RemoveWatcherRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoveWatcherRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveWatcherRequest) ProtoMessage() {} + +func (x *RemoveWatcherRequest) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveWatcherRequest.ProtoReflect.Descriptor instead. +func (*RemoveWatcherRequest) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{18} +} + +func (x *RemoveWatcherRequest) GetWatcherId() string { + if x != nil { + return x.WatcherId + } + return "" +} + +type RemoveWatcherResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *RemoveWatcherResponse) Reset() { + *x = RemoveWatcherResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemoveWatcherResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveWatcherResponse) ProtoMessage() {} + +func (x *RemoveWatcherResponse) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveWatcherResponse.ProtoReflect.Descriptor instead. +func (*RemoveWatcherResponse) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{19} +} + +type WatchDirResponse_StartEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *WatchDirResponse_StartEvent) Reset() { + *x = WatchDirResponse_StartEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchDirResponse_StartEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchDirResponse_StartEvent) ProtoMessage() {} + +func (x *WatchDirResponse_StartEvent) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchDirResponse_StartEvent.ProtoReflect.Descriptor instead. +func (*WatchDirResponse_StartEvent) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13, 0} +} + +type WatchDirResponse_KeepAlive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *WatchDirResponse_KeepAlive) Reset() { + *x = WatchDirResponse_KeepAlive{} + if protoimpl.UnsafeEnabled { + mi := &file_filesystem_filesystem_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchDirResponse_KeepAlive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchDirResponse_KeepAlive) ProtoMessage() {} + +func (x *WatchDirResponse_KeepAlive) ProtoReflect() protoreflect.Message { + mi := &file_filesystem_filesystem_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchDirResponse_KeepAlive.ProtoReflect.Descriptor instead. +func (*WatchDirResponse_KeepAlive) Descriptor() ([]byte, []int) { + return file_filesystem_filesystem_proto_rawDescGZIP(), []int{13, 1} +} + +var File_filesystem_filesystem_proto protoreflect.FileDescriptor + +var file_filesystem_filesystem_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x47, 0x0a, 0x0b, 0x4d, 0x6f, 0x76, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x3b, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, + 0x24, 0x0a, 0x0e, 0x4d, 0x61, 0x6b, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x3e, 0x0a, 0x0f, 0x4d, 0x61, 0x6b, 0x65, 0x44, 0x69, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x23, 0x0a, 0x0d, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x10, 0x0a, 0x0e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x0a, 0x0b, + 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, + 0x3b, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2b, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x5d, 0x0a, 0x09, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x3a, 0x0a, 0x0e, 0x4c, + 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x05, 0x64, 0x65, 0x70, 0x74, 0x68, 0x22, 0x42, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x44, + 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x65, 0x6e, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0f, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x22, 0x50, 0x0a, 0x0f, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x3d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x46, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, + 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, + 0x76, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x1a, + 0x0c, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x0b, 0x0a, + 0x09, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0x48, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x36, 0x0a, + 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x49, 0x64, 0x22, 0x38, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x49, 0x64, 0x22, + 0x4f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x35, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x77, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x77, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x49, 0x64, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2a, 0x52, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, + 0x46, 0x49, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x46, 0x49, 0x4c, 0x45, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x46, + 0x49, 0x4c, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x4f, + 0x52, 0x59, 0x10, 0x02, 0x2a, 0x98, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x15, + 0x0a, 0x11, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, + 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x52, 0x45, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x4d, 0x4f, 0x44, 0x10, 0x05, 0x32, + 0x9f, 0x05, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x39, + 0x0a, 0x04, 0x53, 0x74, 0x61, 0x74, 0x12, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x18, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x4d, 0x61, 0x6b, + 0x65, 0x44, 0x69, 0x72, 0x12, 0x1a, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x2e, 0x4d, 0x61, 0x6b, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x4d, 0x61, + 0x6b, 0x65, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, + 0x04, 0x4d, 0x6f, 0x76, 0x65, 0x12, 0x17, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x4d, 0x6f, 0x76, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x4c, 0x69, 0x73, 0x74, + 0x44, 0x69, 0x72, 0x12, 0x1a, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x19, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1a, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, + 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x12, 0x1b, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x20, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x23, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x47, 0x65, + 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x20, 0x2e, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0xac, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x42, 0x0f, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, 0x62, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, 0x72, + 0x61, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, + 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x6e, 0x76, 0x64, 0x2f, + 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0xa2, 0x02, 0x03, 0x46, 0x58, 0x58, + 0xaa, 0x02, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0xca, 0x02, 0x0a, + 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0xe2, 0x02, 0x16, 0x46, 0x69, 0x6c, + 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_filesystem_filesystem_proto_rawDescOnce sync.Once + file_filesystem_filesystem_proto_rawDescData = file_filesystem_filesystem_proto_rawDesc +) + +func file_filesystem_filesystem_proto_rawDescGZIP() []byte { + file_filesystem_filesystem_proto_rawDescOnce.Do(func() { + file_filesystem_filesystem_proto_rawDescData = protoimpl.X.CompressGZIP(file_filesystem_filesystem_proto_rawDescData) + }) + return file_filesystem_filesystem_proto_rawDescData +} + +var file_filesystem_filesystem_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_filesystem_filesystem_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_filesystem_filesystem_proto_goTypes = []interface{}{ + (FileType)(0), // 0: filesystem.FileType + (EventType)(0), // 1: filesystem.EventType + (*MoveRequest)(nil), // 2: filesystem.MoveRequest + (*MoveResponse)(nil), // 3: filesystem.MoveResponse + (*MakeDirRequest)(nil), // 4: filesystem.MakeDirRequest + (*MakeDirResponse)(nil), // 5: filesystem.MakeDirResponse + (*RemoveRequest)(nil), // 6: filesystem.RemoveRequest + (*RemoveResponse)(nil), // 7: filesystem.RemoveResponse + (*StatRequest)(nil), // 8: filesystem.StatRequest + (*StatResponse)(nil), // 9: filesystem.StatResponse + (*EntryInfo)(nil), // 10: filesystem.EntryInfo + (*ListDirRequest)(nil), // 11: filesystem.ListDirRequest + (*ListDirResponse)(nil), // 12: filesystem.ListDirResponse + (*WatchDirRequest)(nil), // 13: filesystem.WatchDirRequest + (*FilesystemEvent)(nil), // 14: filesystem.FilesystemEvent + (*WatchDirResponse)(nil), // 15: filesystem.WatchDirResponse + (*CreateWatcherRequest)(nil), // 16: filesystem.CreateWatcherRequest + (*CreateWatcherResponse)(nil), // 17: filesystem.CreateWatcherResponse + (*GetWatcherEventsRequest)(nil), // 18: filesystem.GetWatcherEventsRequest + (*GetWatcherEventsResponse)(nil), // 19: filesystem.GetWatcherEventsResponse + (*RemoveWatcherRequest)(nil), // 20: filesystem.RemoveWatcherRequest + (*RemoveWatcherResponse)(nil), // 21: filesystem.RemoveWatcherResponse + (*WatchDirResponse_StartEvent)(nil), // 22: filesystem.WatchDirResponse.StartEvent + (*WatchDirResponse_KeepAlive)(nil), // 23: filesystem.WatchDirResponse.KeepAlive +} +var file_filesystem_filesystem_proto_depIdxs = []int32{ + 10, // 0: filesystem.MoveResponse.entry:type_name -> filesystem.EntryInfo + 10, // 1: filesystem.MakeDirResponse.entry:type_name -> filesystem.EntryInfo + 10, // 2: filesystem.StatResponse.entry:type_name -> filesystem.EntryInfo + 0, // 3: filesystem.EntryInfo.type:type_name -> filesystem.FileType + 10, // 4: filesystem.ListDirResponse.entries:type_name -> filesystem.EntryInfo + 1, // 5: filesystem.FilesystemEvent.type:type_name -> filesystem.EventType + 22, // 6: filesystem.WatchDirResponse.start:type_name -> filesystem.WatchDirResponse.StartEvent + 14, // 7: filesystem.WatchDirResponse.filesystem:type_name -> filesystem.FilesystemEvent + 23, // 8: filesystem.WatchDirResponse.keepalive:type_name -> filesystem.WatchDirResponse.KeepAlive + 14, // 9: filesystem.GetWatcherEventsResponse.events:type_name -> filesystem.FilesystemEvent + 8, // 10: filesystem.Filesystem.Stat:input_type -> filesystem.StatRequest + 4, // 11: filesystem.Filesystem.MakeDir:input_type -> filesystem.MakeDirRequest + 2, // 12: filesystem.Filesystem.Move:input_type -> filesystem.MoveRequest + 11, // 13: filesystem.Filesystem.ListDir:input_type -> filesystem.ListDirRequest + 6, // 14: filesystem.Filesystem.Remove:input_type -> filesystem.RemoveRequest + 13, // 15: filesystem.Filesystem.WatchDir:input_type -> filesystem.WatchDirRequest + 16, // 16: filesystem.Filesystem.CreateWatcher:input_type -> filesystem.CreateWatcherRequest + 18, // 17: filesystem.Filesystem.GetWatcherEvents:input_type -> filesystem.GetWatcherEventsRequest + 20, // 18: filesystem.Filesystem.RemoveWatcher:input_type -> filesystem.RemoveWatcherRequest + 9, // 19: filesystem.Filesystem.Stat:output_type -> filesystem.StatResponse + 5, // 20: filesystem.Filesystem.MakeDir:output_type -> filesystem.MakeDirResponse + 3, // 21: filesystem.Filesystem.Move:output_type -> filesystem.MoveResponse + 12, // 22: filesystem.Filesystem.ListDir:output_type -> filesystem.ListDirResponse + 7, // 23: filesystem.Filesystem.Remove:output_type -> filesystem.RemoveResponse + 15, // 24: filesystem.Filesystem.WatchDir:output_type -> filesystem.WatchDirResponse + 17, // 25: filesystem.Filesystem.CreateWatcher:output_type -> filesystem.CreateWatcherResponse + 19, // 26: filesystem.Filesystem.GetWatcherEvents:output_type -> filesystem.GetWatcherEventsResponse + 21, // 27: filesystem.Filesystem.RemoveWatcher:output_type -> filesystem.RemoveWatcherResponse + 19, // [19:28] is the sub-list for method output_type + 10, // [10:19] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_filesystem_filesystem_proto_init() } +func file_filesystem_filesystem_proto_init() { + if File_filesystem_filesystem_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_filesystem_filesystem_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MakeDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MakeDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchDirRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilesystemEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchDirResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateWatcherRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateWatcherResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWatcherEventsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWatcherEventsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveWatcherRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveWatcherResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchDirResponse_StartEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_filesystem_filesystem_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchDirResponse_KeepAlive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_filesystem_filesystem_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*WatchDirResponse_Start)(nil), + (*WatchDirResponse_Filesystem)(nil), + (*WatchDirResponse_Keepalive)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_filesystem_filesystem_proto_rawDesc, + NumEnums: 2, + NumMessages: 22, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_filesystem_filesystem_proto_goTypes, + DependencyIndexes: file_filesystem_filesystem_proto_depIdxs, + EnumInfos: file_filesystem_filesystem_proto_enumTypes, + MessageInfos: file_filesystem_filesystem_proto_msgTypes, + }.Build() + File_filesystem_filesystem_proto = out.File + file_filesystem_filesystem_proto_rawDesc = nil + file_filesystem_filesystem_proto_goTypes = nil + file_filesystem_filesystem_proto_depIdxs = nil +} diff --git a/packages/shared/pkg/grpc/envd/filesystem/filesystemconnect/filesystem.connect.go b/packages/shared/pkg/grpc/envd/filesystem/filesystemconnect/filesystem.connect.go new file mode 100644 index 0000000..715359d --- /dev/null +++ b/packages/shared/pkg/grpc/envd/filesystem/filesystemconnect/filesystem.connect.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: filesystem/filesystem.proto + +package filesystemconnect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + filesystem "github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/filesystem" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // FilesystemName is the fully-qualified name of the Filesystem service. + FilesystemName = "filesystem.Filesystem" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // FilesystemStatProcedure is the fully-qualified name of the Filesystem's Stat RPC. + FilesystemStatProcedure = "/filesystem.Filesystem/Stat" + // FilesystemMakeDirProcedure is the fully-qualified name of the Filesystem's MakeDir RPC. + FilesystemMakeDirProcedure = "/filesystem.Filesystem/MakeDir" + // FilesystemMoveProcedure is the fully-qualified name of the Filesystem's Move RPC. + FilesystemMoveProcedure = "/filesystem.Filesystem/Move" + // FilesystemListDirProcedure is the fully-qualified name of the Filesystem's ListDir RPC. + FilesystemListDirProcedure = "/filesystem.Filesystem/ListDir" + // FilesystemRemoveProcedure is the fully-qualified name of the Filesystem's Remove RPC. + FilesystemRemoveProcedure = "/filesystem.Filesystem/Remove" + // FilesystemWatchDirProcedure is the fully-qualified name of the Filesystem's WatchDir RPC. + FilesystemWatchDirProcedure = "/filesystem.Filesystem/WatchDir" + // FilesystemCreateWatcherProcedure is the fully-qualified name of the Filesystem's CreateWatcher + // RPC. + FilesystemCreateWatcherProcedure = "/filesystem.Filesystem/CreateWatcher" + // FilesystemGetWatcherEventsProcedure is the fully-qualified name of the Filesystem's + // GetWatcherEvents RPC. + FilesystemGetWatcherEventsProcedure = "/filesystem.Filesystem/GetWatcherEvents" + // FilesystemRemoveWatcherProcedure is the fully-qualified name of the Filesystem's RemoveWatcher + // RPC. + FilesystemRemoveWatcherProcedure = "/filesystem.Filesystem/RemoveWatcher" +) + +// FilesystemClient is a client for the filesystem.Filesystem service. +type FilesystemClient interface { + Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) + MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) + Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) + ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) + Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) + WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest]) (*connect.ServerStreamForClient[filesystem.WatchDirResponse], error) + // Non-streaming versions of WatchDir + CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) + GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) + RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) +} + +// NewFilesystemClient constructs a client for the filesystem.Filesystem service. By default, it +// uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends +// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewFilesystemClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) FilesystemClient { + baseURL = strings.TrimRight(baseURL, "/") + filesystemMethods := filesystem.File_filesystem_filesystem_proto.Services().ByName("Filesystem").Methods() + return &filesystemClient{ + stat: connect.NewClient[filesystem.StatRequest, filesystem.StatResponse]( + httpClient, + baseURL+FilesystemStatProcedure, + connect.WithSchema(filesystemMethods.ByName("Stat")), + connect.WithClientOptions(opts...), + ), + makeDir: connect.NewClient[filesystem.MakeDirRequest, filesystem.MakeDirResponse]( + httpClient, + baseURL+FilesystemMakeDirProcedure, + connect.WithSchema(filesystemMethods.ByName("MakeDir")), + connect.WithClientOptions(opts...), + ), + move: connect.NewClient[filesystem.MoveRequest, filesystem.MoveResponse]( + httpClient, + baseURL+FilesystemMoveProcedure, + connect.WithSchema(filesystemMethods.ByName("Move")), + connect.WithClientOptions(opts...), + ), + listDir: connect.NewClient[filesystem.ListDirRequest, filesystem.ListDirResponse]( + httpClient, + baseURL+FilesystemListDirProcedure, + connect.WithSchema(filesystemMethods.ByName("ListDir")), + connect.WithClientOptions(opts...), + ), + remove: connect.NewClient[filesystem.RemoveRequest, filesystem.RemoveResponse]( + httpClient, + baseURL+FilesystemRemoveProcedure, + connect.WithSchema(filesystemMethods.ByName("Remove")), + connect.WithClientOptions(opts...), + ), + watchDir: connect.NewClient[filesystem.WatchDirRequest, filesystem.WatchDirResponse]( + httpClient, + baseURL+FilesystemWatchDirProcedure, + connect.WithSchema(filesystemMethods.ByName("WatchDir")), + connect.WithClientOptions(opts...), + ), + createWatcher: connect.NewClient[filesystem.CreateWatcherRequest, filesystem.CreateWatcherResponse]( + httpClient, + baseURL+FilesystemCreateWatcherProcedure, + connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), + connect.WithClientOptions(opts...), + ), + getWatcherEvents: connect.NewClient[filesystem.GetWatcherEventsRequest, filesystem.GetWatcherEventsResponse]( + httpClient, + baseURL+FilesystemGetWatcherEventsProcedure, + connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), + connect.WithClientOptions(opts...), + ), + removeWatcher: connect.NewClient[filesystem.RemoveWatcherRequest, filesystem.RemoveWatcherResponse]( + httpClient, + baseURL+FilesystemRemoveWatcherProcedure, + connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), + connect.WithClientOptions(opts...), + ), + } +} + +// filesystemClient implements FilesystemClient. +type filesystemClient struct { + stat *connect.Client[filesystem.StatRequest, filesystem.StatResponse] + makeDir *connect.Client[filesystem.MakeDirRequest, filesystem.MakeDirResponse] + move *connect.Client[filesystem.MoveRequest, filesystem.MoveResponse] + listDir *connect.Client[filesystem.ListDirRequest, filesystem.ListDirResponse] + remove *connect.Client[filesystem.RemoveRequest, filesystem.RemoveResponse] + watchDir *connect.Client[filesystem.WatchDirRequest, filesystem.WatchDirResponse] + createWatcher *connect.Client[filesystem.CreateWatcherRequest, filesystem.CreateWatcherResponse] + getWatcherEvents *connect.Client[filesystem.GetWatcherEventsRequest, filesystem.GetWatcherEventsResponse] + removeWatcher *connect.Client[filesystem.RemoveWatcherRequest, filesystem.RemoveWatcherResponse] +} + +// Stat calls filesystem.Filesystem.Stat. +func (c *filesystemClient) Stat(ctx context.Context, req *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) { + return c.stat.CallUnary(ctx, req) +} + +// MakeDir calls filesystem.Filesystem.MakeDir. +func (c *filesystemClient) MakeDir(ctx context.Context, req *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) { + return c.makeDir.CallUnary(ctx, req) +} + +// Move calls filesystem.Filesystem.Move. +func (c *filesystemClient) Move(ctx context.Context, req *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) { + return c.move.CallUnary(ctx, req) +} + +// ListDir calls filesystem.Filesystem.ListDir. +func (c *filesystemClient) ListDir(ctx context.Context, req *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) { + return c.listDir.CallUnary(ctx, req) +} + +// Remove calls filesystem.Filesystem.Remove. +func (c *filesystemClient) Remove(ctx context.Context, req *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) { + return c.remove.CallUnary(ctx, req) +} + +// WatchDir calls filesystem.Filesystem.WatchDir. +func (c *filesystemClient) WatchDir(ctx context.Context, req *connect.Request[filesystem.WatchDirRequest]) (*connect.ServerStreamForClient[filesystem.WatchDirResponse], error) { + return c.watchDir.CallServerStream(ctx, req) +} + +// CreateWatcher calls filesystem.Filesystem.CreateWatcher. +func (c *filesystemClient) CreateWatcher(ctx context.Context, req *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) { + return c.createWatcher.CallUnary(ctx, req) +} + +// GetWatcherEvents calls filesystem.Filesystem.GetWatcherEvents. +func (c *filesystemClient) GetWatcherEvents(ctx context.Context, req *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) { + return c.getWatcherEvents.CallUnary(ctx, req) +} + +// RemoveWatcher calls filesystem.Filesystem.RemoveWatcher. +func (c *filesystemClient) RemoveWatcher(ctx context.Context, req *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) { + return c.removeWatcher.CallUnary(ctx, req) +} + +// FilesystemHandler is an implementation of the filesystem.Filesystem service. +type FilesystemHandler interface { + Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) + MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) + Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) + ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) + Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) + WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest], *connect.ServerStream[filesystem.WatchDirResponse]) error + // Non-streaming versions of WatchDir + CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) + GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) + RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) +} + +// NewFilesystemHandler builds an HTTP handler from the service implementation. It returns the path +// on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewFilesystemHandler(svc FilesystemHandler, opts ...connect.HandlerOption) (string, http.Handler) { + filesystemMethods := filesystem.File_filesystem_filesystem_proto.Services().ByName("Filesystem").Methods() + filesystemStatHandler := connect.NewUnaryHandler( + FilesystemStatProcedure, + svc.Stat, + connect.WithSchema(filesystemMethods.ByName("Stat")), + connect.WithHandlerOptions(opts...), + ) + filesystemMakeDirHandler := connect.NewUnaryHandler( + FilesystemMakeDirProcedure, + svc.MakeDir, + connect.WithSchema(filesystemMethods.ByName("MakeDir")), + connect.WithHandlerOptions(opts...), + ) + filesystemMoveHandler := connect.NewUnaryHandler( + FilesystemMoveProcedure, + svc.Move, + connect.WithSchema(filesystemMethods.ByName("Move")), + connect.WithHandlerOptions(opts...), + ) + filesystemListDirHandler := connect.NewUnaryHandler( + FilesystemListDirProcedure, + svc.ListDir, + connect.WithSchema(filesystemMethods.ByName("ListDir")), + connect.WithHandlerOptions(opts...), + ) + filesystemRemoveHandler := connect.NewUnaryHandler( + FilesystemRemoveProcedure, + svc.Remove, + connect.WithSchema(filesystemMethods.ByName("Remove")), + connect.WithHandlerOptions(opts...), + ) + filesystemWatchDirHandler := connect.NewServerStreamHandler( + FilesystemWatchDirProcedure, + svc.WatchDir, + connect.WithSchema(filesystemMethods.ByName("WatchDir")), + connect.WithHandlerOptions(opts...), + ) + filesystemCreateWatcherHandler := connect.NewUnaryHandler( + FilesystemCreateWatcherProcedure, + svc.CreateWatcher, + connect.WithSchema(filesystemMethods.ByName("CreateWatcher")), + connect.WithHandlerOptions(opts...), + ) + filesystemGetWatcherEventsHandler := connect.NewUnaryHandler( + FilesystemGetWatcherEventsProcedure, + svc.GetWatcherEvents, + connect.WithSchema(filesystemMethods.ByName("GetWatcherEvents")), + connect.WithHandlerOptions(opts...), + ) + filesystemRemoveWatcherHandler := connect.NewUnaryHandler( + FilesystemRemoveWatcherProcedure, + svc.RemoveWatcher, + connect.WithSchema(filesystemMethods.ByName("RemoveWatcher")), + connect.WithHandlerOptions(opts...), + ) + return "/filesystem.Filesystem/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case FilesystemStatProcedure: + filesystemStatHandler.ServeHTTP(w, r) + case FilesystemMakeDirProcedure: + filesystemMakeDirHandler.ServeHTTP(w, r) + case FilesystemMoveProcedure: + filesystemMoveHandler.ServeHTTP(w, r) + case FilesystemListDirProcedure: + filesystemListDirHandler.ServeHTTP(w, r) + case FilesystemRemoveProcedure: + filesystemRemoveHandler.ServeHTTP(w, r) + case FilesystemWatchDirProcedure: + filesystemWatchDirHandler.ServeHTTP(w, r) + case FilesystemCreateWatcherProcedure: + filesystemCreateWatcherHandler.ServeHTTP(w, r) + case FilesystemGetWatcherEventsProcedure: + filesystemGetWatcherEventsHandler.ServeHTTP(w, r) + case FilesystemRemoveWatcherProcedure: + filesystemRemoveWatcherHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedFilesystemHandler returns CodeUnimplemented from all methods. +type UnimplementedFilesystemHandler struct{} + +func (UnimplementedFilesystemHandler) Stat(context.Context, *connect.Request[filesystem.StatRequest]) (*connect.Response[filesystem.StatResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Stat is not implemented")) +} + +func (UnimplementedFilesystemHandler) MakeDir(context.Context, *connect.Request[filesystem.MakeDirRequest]) (*connect.Response[filesystem.MakeDirResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.MakeDir is not implemented")) +} + +func (UnimplementedFilesystemHandler) Move(context.Context, *connect.Request[filesystem.MoveRequest]) (*connect.Response[filesystem.MoveResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Move is not implemented")) +} + +func (UnimplementedFilesystemHandler) ListDir(context.Context, *connect.Request[filesystem.ListDirRequest]) (*connect.Response[filesystem.ListDirResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.ListDir is not implemented")) +} + +func (UnimplementedFilesystemHandler) Remove(context.Context, *connect.Request[filesystem.RemoveRequest]) (*connect.Response[filesystem.RemoveResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.Remove is not implemented")) +} + +func (UnimplementedFilesystemHandler) WatchDir(context.Context, *connect.Request[filesystem.WatchDirRequest], *connect.ServerStream[filesystem.WatchDirResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.WatchDir is not implemented")) +} + +func (UnimplementedFilesystemHandler) CreateWatcher(context.Context, *connect.Request[filesystem.CreateWatcherRequest]) (*connect.Response[filesystem.CreateWatcherResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.CreateWatcher is not implemented")) +} + +func (UnimplementedFilesystemHandler) GetWatcherEvents(context.Context, *connect.Request[filesystem.GetWatcherEventsRequest]) (*connect.Response[filesystem.GetWatcherEventsResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.GetWatcherEvents is not implemented")) +} + +func (UnimplementedFilesystemHandler) RemoveWatcher(context.Context, *connect.Request[filesystem.RemoveWatcherRequest]) (*connect.Response[filesystem.RemoveWatcherResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("filesystem.Filesystem.RemoveWatcher is not implemented")) +} diff --git a/packages/shared/pkg/grpc/envd/process/process.pb.go b/packages/shared/pkg/grpc/envd/process/process.pb.go new file mode 100644 index 0000000..7228aa8 --- /dev/null +++ b/packages/shared/pkg/grpc/envd/process/process.pb.go @@ -0,0 +1,2340 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: process/process.proto + +package process + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Signal int32 + +const ( + Signal_SIGNAL_UNSPECIFIED Signal = 0 + Signal_SIGNAL_SIGTERM Signal = 15 + Signal_SIGNAL_SIGKILL Signal = 9 +) + +// Enum value maps for Signal. +var ( + Signal_name = map[int32]string{ + 0: "SIGNAL_UNSPECIFIED", + 15: "SIGNAL_SIGTERM", + 9: "SIGNAL_SIGKILL", + } + Signal_value = map[string]int32{ + "SIGNAL_UNSPECIFIED": 0, + "SIGNAL_SIGTERM": 15, + "SIGNAL_SIGKILL": 9, + } +) + +func (x Signal) Enum() *Signal { + p := new(Signal) + *p = x + return p +} + +func (x Signal) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Signal) Descriptor() protoreflect.EnumDescriptor { + return file_process_process_proto_enumTypes[0].Descriptor() +} + +func (Signal) Type() protoreflect.EnumType { + return &file_process_process_proto_enumTypes[0] +} + +func (x Signal) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Signal.Descriptor instead. +func (Signal) EnumDescriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{0} +} + +type PTY struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Size *PTY_Size `protobuf:"bytes,1,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *PTY) Reset() { + *x = PTY{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PTY) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PTY) ProtoMessage() {} + +func (x *PTY) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PTY.ProtoReflect.Descriptor instead. +func (*PTY) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{0} +} + +func (x *PTY) GetSize() *PTY_Size { + if x != nil { + return x.Size + } + return nil +} + +type ProcessConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cmd string `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"` + Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"` + Envs map[string]string `protobuf:"bytes,3,rep,name=envs,proto3" json:"envs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Cwd *string `protobuf:"bytes,4,opt,name=cwd,proto3,oneof" json:"cwd,omitempty"` +} + +func (x *ProcessConfig) Reset() { + *x = ProcessConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessConfig) ProtoMessage() {} + +func (x *ProcessConfig) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessConfig.ProtoReflect.Descriptor instead. +func (*ProcessConfig) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{1} +} + +func (x *ProcessConfig) GetCmd() string { + if x != nil { + return x.Cmd + } + return "" +} + +func (x *ProcessConfig) GetArgs() []string { + if x != nil { + return x.Args + } + return nil +} + +func (x *ProcessConfig) GetEnvs() map[string]string { + if x != nil { + return x.Envs + } + return nil +} + +func (x *ProcessConfig) GetCwd() string { + if x != nil && x.Cwd != nil { + return *x.Cwd + } + return "" +} + +type ListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ListRequest) Reset() { + *x = ListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListRequest) ProtoMessage() {} + +func (x *ListRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. +func (*ListRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{2} +} + +type ProcessInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *ProcessConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` +} + +func (x *ProcessInfo) Reset() { + *x = ProcessInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessInfo) ProtoMessage() {} + +func (x *ProcessInfo) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. +func (*ProcessInfo) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{3} +} + +func (x *ProcessInfo) GetConfig() *ProcessConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *ProcessInfo) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ProcessInfo) GetTag() string { + if x != nil && x.Tag != nil { + return *x.Tag + } + return "" +} + +type ListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processes []*ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` +} + +func (x *ListResponse) Reset() { + *x = ListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListResponse) ProtoMessage() {} + +func (x *ListResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. +func (*ListResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{4} +} + +func (x *ListResponse) GetProcesses() []*ProcessInfo { + if x != nil { + return x.Processes + } + return nil +} + +type StartRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessConfig `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` + Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` + Tag *string `protobuf:"bytes,3,opt,name=tag,proto3,oneof" json:"tag,omitempty"` +} + +func (x *StartRequest) Reset() { + *x = StartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartRequest) ProtoMessage() {} + +func (x *StartRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. +func (*StartRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{5} +} + +func (x *StartRequest) GetProcess() *ProcessConfig { + if x != nil { + return x.Process + } + return nil +} + +func (x *StartRequest) GetPty() *PTY { + if x != nil { + return x.Pty + } + return nil +} + +func (x *StartRequest) GetTag() string { + if x != nil && x.Tag != nil { + return *x.Tag + } + return "" +} + +type UpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` + Pty *PTY `protobuf:"bytes,2,opt,name=pty,proto3,oneof" json:"pty,omitempty"` +} + +func (x *UpdateRequest) Reset() { + *x = UpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateRequest) ProtoMessage() {} + +func (x *UpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{6} +} + +func (x *UpdateRequest) GetProcess() *ProcessSelector { + if x != nil { + return x.Process + } + return nil +} + +func (x *UpdateRequest) GetPty() *PTY { + if x != nil { + return x.Pty + } + return nil +} + +type UpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateResponse) Reset() { + *x = UpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateResponse) ProtoMessage() {} + +func (x *UpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead. +func (*UpdateResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{7} +} + +type ProcessEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Event: + // + // *ProcessEvent_Start + // *ProcessEvent_Data + // *ProcessEvent_End + // *ProcessEvent_Keepalive + Event isProcessEvent_Event `protobuf_oneof:"event"` +} + +func (x *ProcessEvent) Reset() { + *x = ProcessEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEvent) ProtoMessage() {} + +func (x *ProcessEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEvent.ProtoReflect.Descriptor instead. +func (*ProcessEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{8} +} + +func (m *ProcessEvent) GetEvent() isProcessEvent_Event { + if m != nil { + return m.Event + } + return nil +} + +func (x *ProcessEvent) GetStart() *ProcessEvent_StartEvent { + if x, ok := x.GetEvent().(*ProcessEvent_Start); ok { + return x.Start + } + return nil +} + +func (x *ProcessEvent) GetData() *ProcessEvent_DataEvent { + if x, ok := x.GetEvent().(*ProcessEvent_Data); ok { + return x.Data + } + return nil +} + +func (x *ProcessEvent) GetEnd() *ProcessEvent_EndEvent { + if x, ok := x.GetEvent().(*ProcessEvent_End); ok { + return x.End + } + return nil +} + +func (x *ProcessEvent) GetKeepalive() *ProcessEvent_KeepAlive { + if x, ok := x.GetEvent().(*ProcessEvent_Keepalive); ok { + return x.Keepalive + } + return nil +} + +type isProcessEvent_Event interface { + isProcessEvent_Event() +} + +type ProcessEvent_Start struct { + Start *ProcessEvent_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` +} + +type ProcessEvent_Data struct { + Data *ProcessEvent_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` +} + +type ProcessEvent_End struct { + End *ProcessEvent_EndEvent `protobuf:"bytes,3,opt,name=end,proto3,oneof"` +} + +type ProcessEvent_Keepalive struct { + Keepalive *ProcessEvent_KeepAlive `protobuf:"bytes,4,opt,name=keepalive,proto3,oneof"` +} + +func (*ProcessEvent_Start) isProcessEvent_Event() {} + +func (*ProcessEvent_Data) isProcessEvent_Event() {} + +func (*ProcessEvent_End) isProcessEvent_Event() {} + +func (*ProcessEvent_Keepalive) isProcessEvent_Event() {} + +type StartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *StartResponse) Reset() { + *x = StartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResponse) ProtoMessage() {} + +func (x *StartResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. +func (*StartResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{9} +} + +func (x *StartResponse) GetEvent() *ProcessEvent { + if x != nil { + return x.Event + } + return nil +} + +type ConnectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event *ProcessEvent `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *ConnectResponse) Reset() { + *x = ConnectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectResponse) ProtoMessage() {} + +func (x *ConnectResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectResponse.ProtoReflect.Descriptor instead. +func (*ConnectResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{10} +} + +func (x *ConnectResponse) GetEvent() *ProcessEvent { + if x != nil { + return x.Event + } + return nil +} + +type SendInputRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` + Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` +} + +func (x *SendInputRequest) Reset() { + *x = SendInputRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendInputRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendInputRequest) ProtoMessage() {} + +func (x *SendInputRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendInputRequest.ProtoReflect.Descriptor instead. +func (*SendInputRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{11} +} + +func (x *SendInputRequest) GetProcess() *ProcessSelector { + if x != nil { + return x.Process + } + return nil +} + +func (x *SendInputRequest) GetInput() *ProcessInput { + if x != nil { + return x.Input + } + return nil +} + +type SendInputResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SendInputResponse) Reset() { + *x = SendInputResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendInputResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendInputResponse) ProtoMessage() {} + +func (x *SendInputResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendInputResponse.ProtoReflect.Descriptor instead. +func (*SendInputResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{12} +} + +type ProcessInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Input: + // + // *ProcessInput_Stdin + // *ProcessInput_Pty + Input isProcessInput_Input `protobuf_oneof:"input"` +} + +func (x *ProcessInput) Reset() { + *x = ProcessInput{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessInput) ProtoMessage() {} + +func (x *ProcessInput) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessInput.ProtoReflect.Descriptor instead. +func (*ProcessInput) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{13} +} + +func (m *ProcessInput) GetInput() isProcessInput_Input { + if m != nil { + return m.Input + } + return nil +} + +func (x *ProcessInput) GetStdin() []byte { + if x, ok := x.GetInput().(*ProcessInput_Stdin); ok { + return x.Stdin + } + return nil +} + +func (x *ProcessInput) GetPty() []byte { + if x, ok := x.GetInput().(*ProcessInput_Pty); ok { + return x.Pty + } + return nil +} + +type isProcessInput_Input interface { + isProcessInput_Input() +} + +type ProcessInput_Stdin struct { + Stdin []byte `protobuf:"bytes,1,opt,name=stdin,proto3,oneof"` +} + +type ProcessInput_Pty struct { + Pty []byte `protobuf:"bytes,2,opt,name=pty,proto3,oneof"` +} + +func (*ProcessInput_Stdin) isProcessInput_Input() {} + +func (*ProcessInput_Pty) isProcessInput_Input() {} + +type StreamInputRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Event: + // + // *StreamInputRequest_Start + // *StreamInputRequest_Data + // *StreamInputRequest_Keepalive + Event isStreamInputRequest_Event `protobuf_oneof:"event"` +} + +func (x *StreamInputRequest) Reset() { + *x = StreamInputRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInputRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInputRequest) ProtoMessage() {} + +func (x *StreamInputRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInputRequest.ProtoReflect.Descriptor instead. +func (*StreamInputRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{14} +} + +func (m *StreamInputRequest) GetEvent() isStreamInputRequest_Event { + if m != nil { + return m.Event + } + return nil +} + +func (x *StreamInputRequest) GetStart() *StreamInputRequest_StartEvent { + if x, ok := x.GetEvent().(*StreamInputRequest_Start); ok { + return x.Start + } + return nil +} + +func (x *StreamInputRequest) GetData() *StreamInputRequest_DataEvent { + if x, ok := x.GetEvent().(*StreamInputRequest_Data); ok { + return x.Data + } + return nil +} + +func (x *StreamInputRequest) GetKeepalive() *StreamInputRequest_KeepAlive { + if x, ok := x.GetEvent().(*StreamInputRequest_Keepalive); ok { + return x.Keepalive + } + return nil +} + +type isStreamInputRequest_Event interface { + isStreamInputRequest_Event() +} + +type StreamInputRequest_Start struct { + Start *StreamInputRequest_StartEvent `protobuf:"bytes,1,opt,name=start,proto3,oneof"` +} + +type StreamInputRequest_Data struct { + Data *StreamInputRequest_DataEvent `protobuf:"bytes,2,opt,name=data,proto3,oneof"` +} + +type StreamInputRequest_Keepalive struct { + Keepalive *StreamInputRequest_KeepAlive `protobuf:"bytes,3,opt,name=keepalive,proto3,oneof"` +} + +func (*StreamInputRequest_Start) isStreamInputRequest_Event() {} + +func (*StreamInputRequest_Data) isStreamInputRequest_Event() {} + +func (*StreamInputRequest_Keepalive) isStreamInputRequest_Event() {} + +type StreamInputResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StreamInputResponse) Reset() { + *x = StreamInputResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInputResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInputResponse) ProtoMessage() {} + +func (x *StreamInputResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInputResponse.ProtoReflect.Descriptor instead. +func (*StreamInputResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{15} +} + +type SendSignalRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` + Signal Signal `protobuf:"varint,2,opt,name=signal,proto3,enum=process.Signal" json:"signal,omitempty"` +} + +func (x *SendSignalRequest) Reset() { + *x = SendSignalRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendSignalRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendSignalRequest) ProtoMessage() {} + +func (x *SendSignalRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendSignalRequest.ProtoReflect.Descriptor instead. +func (*SendSignalRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{16} +} + +func (x *SendSignalRequest) GetProcess() *ProcessSelector { + if x != nil { + return x.Process + } + return nil +} + +func (x *SendSignalRequest) GetSignal() Signal { + if x != nil { + return x.Signal + } + return Signal_SIGNAL_UNSPECIFIED +} + +type SendSignalResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SendSignalResponse) Reset() { + *x = SendSignalResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendSignalResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendSignalResponse) ProtoMessage() {} + +func (x *SendSignalResponse) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendSignalResponse.ProtoReflect.Descriptor instead. +func (*SendSignalResponse) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{17} +} + +type ConnectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` +} + +func (x *ConnectRequest) Reset() { + *x = ConnectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectRequest) ProtoMessage() {} + +func (x *ConnectRequest) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectRequest.ProtoReflect.Descriptor instead. +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{18} +} + +func (x *ConnectRequest) GetProcess() *ProcessSelector { + if x != nil { + return x.Process + } + return nil +} + +type ProcessSelector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Selector: + // + // *ProcessSelector_Pid + // *ProcessSelector_Tag + Selector isProcessSelector_Selector `protobuf_oneof:"selector"` +} + +func (x *ProcessSelector) Reset() { + *x = ProcessSelector{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessSelector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessSelector) ProtoMessage() {} + +func (x *ProcessSelector) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessSelector.ProtoReflect.Descriptor instead. +func (*ProcessSelector) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{19} +} + +func (m *ProcessSelector) GetSelector() isProcessSelector_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (x *ProcessSelector) GetPid() uint32 { + if x, ok := x.GetSelector().(*ProcessSelector_Pid); ok { + return x.Pid + } + return 0 +} + +func (x *ProcessSelector) GetTag() string { + if x, ok := x.GetSelector().(*ProcessSelector_Tag); ok { + return x.Tag + } + return "" +} + +type isProcessSelector_Selector interface { + isProcessSelector_Selector() +} + +type ProcessSelector_Pid struct { + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3,oneof"` +} + +type ProcessSelector_Tag struct { + Tag string `protobuf:"bytes,2,opt,name=tag,proto3,oneof"` +} + +func (*ProcessSelector_Pid) isProcessSelector_Selector() {} + +func (*ProcessSelector_Tag) isProcessSelector_Selector() {} + +type PTY_Size struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cols uint32 `protobuf:"varint,1,opt,name=cols,proto3" json:"cols,omitempty"` + Rows uint32 `protobuf:"varint,2,opt,name=rows,proto3" json:"rows,omitempty"` +} + +func (x *PTY_Size) Reset() { + *x = PTY_Size{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PTY_Size) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PTY_Size) ProtoMessage() {} + +func (x *PTY_Size) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PTY_Size.ProtoReflect.Descriptor instead. +func (*PTY_Size) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *PTY_Size) GetCols() uint32 { + if x != nil { + return x.Cols + } + return 0 +} + +func (x *PTY_Size) GetRows() uint32 { + if x != nil { + return x.Rows + } + return 0 +} + +type ProcessEvent_StartEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *ProcessEvent_StartEvent) Reset() { + *x = ProcessEvent_StartEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessEvent_StartEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEvent_StartEvent) ProtoMessage() {} + +func (x *ProcessEvent_StartEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEvent_StartEvent.ProtoReflect.Descriptor instead. +func (*ProcessEvent_StartEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *ProcessEvent_StartEvent) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +type ProcessEvent_DataEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Output: + // + // *ProcessEvent_DataEvent_Stdout + // *ProcessEvent_DataEvent_Stderr + // *ProcessEvent_DataEvent_Pty + Output isProcessEvent_DataEvent_Output `protobuf_oneof:"output"` +} + +func (x *ProcessEvent_DataEvent) Reset() { + *x = ProcessEvent_DataEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessEvent_DataEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEvent_DataEvent) ProtoMessage() {} + +func (x *ProcessEvent_DataEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEvent_DataEvent.ProtoReflect.Descriptor instead. +func (*ProcessEvent_DataEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{8, 1} +} + +func (m *ProcessEvent_DataEvent) GetOutput() isProcessEvent_DataEvent_Output { + if m != nil { + return m.Output + } + return nil +} + +func (x *ProcessEvent_DataEvent) GetStdout() []byte { + if x, ok := x.GetOutput().(*ProcessEvent_DataEvent_Stdout); ok { + return x.Stdout + } + return nil +} + +func (x *ProcessEvent_DataEvent) GetStderr() []byte { + if x, ok := x.GetOutput().(*ProcessEvent_DataEvent_Stderr); ok { + return x.Stderr + } + return nil +} + +func (x *ProcessEvent_DataEvent) GetPty() []byte { + if x, ok := x.GetOutput().(*ProcessEvent_DataEvent_Pty); ok { + return x.Pty + } + return nil +} + +type isProcessEvent_DataEvent_Output interface { + isProcessEvent_DataEvent_Output() +} + +type ProcessEvent_DataEvent_Stdout struct { + Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3,oneof"` +} + +type ProcessEvent_DataEvent_Stderr struct { + Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3,oneof"` +} + +type ProcessEvent_DataEvent_Pty struct { + Pty []byte `protobuf:"bytes,3,opt,name=pty,proto3,oneof"` +} + +func (*ProcessEvent_DataEvent_Stdout) isProcessEvent_DataEvent_Output() {} + +func (*ProcessEvent_DataEvent_Stderr) isProcessEvent_DataEvent_Output() {} + +func (*ProcessEvent_DataEvent_Pty) isProcessEvent_DataEvent_Output() {} + +type ProcessEvent_EndEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitCode int32 `protobuf:"zigzag32,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + Exited bool `protobuf:"varint,2,opt,name=exited,proto3" json:"exited,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + Error *string `protobuf:"bytes,4,opt,name=error,proto3,oneof" json:"error,omitempty"` +} + +func (x *ProcessEvent_EndEvent) Reset() { + *x = ProcessEvent_EndEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessEvent_EndEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEvent_EndEvent) ProtoMessage() {} + +func (x *ProcessEvent_EndEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEvent_EndEvent.ProtoReflect.Descriptor instead. +func (*ProcessEvent_EndEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *ProcessEvent_EndEvent) GetExitCode() int32 { + if x != nil { + return x.ExitCode + } + return 0 +} + +func (x *ProcessEvent_EndEvent) GetExited() bool { + if x != nil { + return x.Exited + } + return false +} + +func (x *ProcessEvent_EndEvent) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *ProcessEvent_EndEvent) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +type ProcessEvent_KeepAlive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ProcessEvent_KeepAlive) Reset() { + *x = ProcessEvent_KeepAlive{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProcessEvent_KeepAlive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessEvent_KeepAlive) ProtoMessage() {} + +func (x *ProcessEvent_KeepAlive) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessEvent_KeepAlive.ProtoReflect.Descriptor instead. +func (*ProcessEvent_KeepAlive) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{8, 3} +} + +type StreamInputRequest_StartEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *ProcessSelector `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` +} + +func (x *StreamInputRequest_StartEvent) Reset() { + *x = StreamInputRequest_StartEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInputRequest_StartEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInputRequest_StartEvent) ProtoMessage() {} + +func (x *StreamInputRequest_StartEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInputRequest_StartEvent.ProtoReflect.Descriptor instead. +func (*StreamInputRequest_StartEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{14, 0} +} + +func (x *StreamInputRequest_StartEvent) GetProcess() *ProcessSelector { + if x != nil { + return x.Process + } + return nil +} + +type StreamInputRequest_DataEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *ProcessInput `protobuf:"bytes,2,opt,name=input,proto3" json:"input,omitempty"` +} + +func (x *StreamInputRequest_DataEvent) Reset() { + *x = StreamInputRequest_DataEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInputRequest_DataEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInputRequest_DataEvent) ProtoMessage() {} + +func (x *StreamInputRequest_DataEvent) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInputRequest_DataEvent.ProtoReflect.Descriptor instead. +func (*StreamInputRequest_DataEvent) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{14, 1} +} + +func (x *StreamInputRequest_DataEvent) GetInput() *ProcessInput { + if x != nil { + return x.Input + } + return nil +} + +type StreamInputRequest_KeepAlive struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StreamInputRequest_KeepAlive) Reset() { + *x = StreamInputRequest_KeepAlive{} + if protoimpl.UnsafeEnabled { + mi := &file_process_process_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInputRequest_KeepAlive) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInputRequest_KeepAlive) ProtoMessage() {} + +func (x *StreamInputRequest_KeepAlive) ProtoReflect() protoreflect.Message { + mi := &file_process_process_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInputRequest_KeepAlive.ProtoReflect.Descriptor instead. +func (*StreamInputRequest_KeepAlive) Descriptor() ([]byte, []int) { + return file_process_process_proto_rawDescGZIP(), []int{14, 2} +} + +var File_process_process_proto protoreflect.FileDescriptor + +var file_process_process_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x22, 0x5c, 0x0a, 0x03, 0x50, 0x54, 0x59, 0x12, 0x25, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, + 0x50, 0x54, 0x59, 0x2e, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x1a, 0x2e, + 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, + 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x22, 0xc3, + 0x01, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x10, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, + 0x6d, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x65, 0x6e, 0x76, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x76, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x76, 0x73, 0x12, 0x15, 0x0a, 0x03, + 0x63, 0x77, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x63, 0x77, 0x64, + 0x88, 0x01, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x06, 0x0a, 0x04, + 0x5f, 0x63, 0x77, 0x64, 0x22, 0x0d, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x6e, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x70, 0x69, 0x64, 0x12, 0x15, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x03, 0x74, 0x61, 0x67, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, + 0x74, 0x61, 0x67, 0x22, 0x42, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x03, 0x70, 0x74, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x2e, 0x50, 0x54, 0x59, 0x48, 0x00, 0x52, 0x03, 0x70, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, + 0x15, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x03, + 0x74, 0x61, 0x67, 0x88, 0x01, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x70, 0x74, 0x79, 0x42, 0x06, + 0x0a, 0x04, 0x5f, 0x74, 0x61, 0x67, 0x22, 0x70, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x03, 0x70, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x2e, 0x50, 0x54, 0x59, 0x48, 0x00, 0x52, 0x03, 0x70, 0x74, 0x79, 0x88, 0x01, 0x01, + 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x70, 0x74, 0x79, 0x22, 0x10, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x04, 0x0a, 0x0c, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x35, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x32, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x45, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x3f, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x41, + 0x6c, 0x69, 0x76, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, + 0x65, 0x1a, 0x1e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, + 0x64, 0x1a, 0x5d, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x18, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, + 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, + 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, + 0x72, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x70, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x03, 0x70, 0x74, 0x79, 0x42, 0x08, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x1a, 0x7c, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x78, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, + 0x08, 0x65, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, + 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x74, 0x65, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x0b, + 0x0a, 0x09, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x22, 0x3c, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x22, 0x73, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x53, 0x65, 0x6e, 0x64, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x0a, 0x0c, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x05, + 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x05, 0x73, + 0x74, 0x64, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x03, 0x70, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x03, 0x70, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x22, 0xea, 0x02, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, + 0x00, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x45, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, + 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x1a, 0x40, 0x0a, 0x0a, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x1a, 0x38, + 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x1a, 0x0b, 0x0a, 0x09, 0x4b, 0x65, 0x65, 0x70, + 0x41, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x15, + 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x70, 0x0a, 0x11, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x69, 0x67, + 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x27, + 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, + 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, + 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x65, 0x6e, 0x64, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x0a, + 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x32, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x22, 0x45, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x03, 0x74, 0x61, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x74, 0x61, 0x67, 0x42, 0x0a, + 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2a, 0x48, 0x0a, 0x06, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, + 0x53, 0x49, 0x47, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x54, 0x45, 0x52, 0x4d, 0x10, 0x0f, + 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4b, 0x49, + 0x4c, 0x4c, 0x10, 0x09, 0x32, 0xca, 0x03, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x12, 0x33, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, + 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x12, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x38, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x15, + 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, + 0x39, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x53, 0x65, + 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, + 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x53, 0x65, + 0x6e, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x2e, 0x53, + 0x65, 0x6e, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x97, 0x01, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x42, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, + 0x62, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x73, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x6e, 0x76, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0xa2, 0x02, 0x03, 0x50, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0xca, 0x02, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0xe2, 0x02, 0x13, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0xea, 0x02, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_process_process_proto_rawDescOnce sync.Once + file_process_process_proto_rawDescData = file_process_process_proto_rawDesc +) + +func file_process_process_proto_rawDescGZIP() []byte { + file_process_process_proto_rawDescOnce.Do(func() { + file_process_process_proto_rawDescData = protoimpl.X.CompressGZIP(file_process_process_proto_rawDescData) + }) + return file_process_process_proto_rawDescData +} + +var file_process_process_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_process_process_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_process_process_proto_goTypes = []interface{}{ + (Signal)(0), // 0: process.Signal + (*PTY)(nil), // 1: process.PTY + (*ProcessConfig)(nil), // 2: process.ProcessConfig + (*ListRequest)(nil), // 3: process.ListRequest + (*ProcessInfo)(nil), // 4: process.ProcessInfo + (*ListResponse)(nil), // 5: process.ListResponse + (*StartRequest)(nil), // 6: process.StartRequest + (*UpdateRequest)(nil), // 7: process.UpdateRequest + (*UpdateResponse)(nil), // 8: process.UpdateResponse + (*ProcessEvent)(nil), // 9: process.ProcessEvent + (*StartResponse)(nil), // 10: process.StartResponse + (*ConnectResponse)(nil), // 11: process.ConnectResponse + (*SendInputRequest)(nil), // 12: process.SendInputRequest + (*SendInputResponse)(nil), // 13: process.SendInputResponse + (*ProcessInput)(nil), // 14: process.ProcessInput + (*StreamInputRequest)(nil), // 15: process.StreamInputRequest + (*StreamInputResponse)(nil), // 16: process.StreamInputResponse + (*SendSignalRequest)(nil), // 17: process.SendSignalRequest + (*SendSignalResponse)(nil), // 18: process.SendSignalResponse + (*ConnectRequest)(nil), // 19: process.ConnectRequest + (*ProcessSelector)(nil), // 20: process.ProcessSelector + (*PTY_Size)(nil), // 21: process.PTY.Size + nil, // 22: process.ProcessConfig.EnvsEntry + (*ProcessEvent_StartEvent)(nil), // 23: process.ProcessEvent.StartEvent + (*ProcessEvent_DataEvent)(nil), // 24: process.ProcessEvent.DataEvent + (*ProcessEvent_EndEvent)(nil), // 25: process.ProcessEvent.EndEvent + (*ProcessEvent_KeepAlive)(nil), // 26: process.ProcessEvent.KeepAlive + (*StreamInputRequest_StartEvent)(nil), // 27: process.StreamInputRequest.StartEvent + (*StreamInputRequest_DataEvent)(nil), // 28: process.StreamInputRequest.DataEvent + (*StreamInputRequest_KeepAlive)(nil), // 29: process.StreamInputRequest.KeepAlive +} +var file_process_process_proto_depIdxs = []int32{ + 21, // 0: process.PTY.size:type_name -> process.PTY.Size + 22, // 1: process.ProcessConfig.envs:type_name -> process.ProcessConfig.EnvsEntry + 2, // 2: process.ProcessInfo.config:type_name -> process.ProcessConfig + 4, // 3: process.ListResponse.processes:type_name -> process.ProcessInfo + 2, // 4: process.StartRequest.process:type_name -> process.ProcessConfig + 1, // 5: process.StartRequest.pty:type_name -> process.PTY + 20, // 6: process.UpdateRequest.process:type_name -> process.ProcessSelector + 1, // 7: process.UpdateRequest.pty:type_name -> process.PTY + 23, // 8: process.ProcessEvent.start:type_name -> process.ProcessEvent.StartEvent + 24, // 9: process.ProcessEvent.data:type_name -> process.ProcessEvent.DataEvent + 25, // 10: process.ProcessEvent.end:type_name -> process.ProcessEvent.EndEvent + 26, // 11: process.ProcessEvent.keepalive:type_name -> process.ProcessEvent.KeepAlive + 9, // 12: process.StartResponse.event:type_name -> process.ProcessEvent + 9, // 13: process.ConnectResponse.event:type_name -> process.ProcessEvent + 20, // 14: process.SendInputRequest.process:type_name -> process.ProcessSelector + 14, // 15: process.SendInputRequest.input:type_name -> process.ProcessInput + 27, // 16: process.StreamInputRequest.start:type_name -> process.StreamInputRequest.StartEvent + 28, // 17: process.StreamInputRequest.data:type_name -> process.StreamInputRequest.DataEvent + 29, // 18: process.StreamInputRequest.keepalive:type_name -> process.StreamInputRequest.KeepAlive + 20, // 19: process.SendSignalRequest.process:type_name -> process.ProcessSelector + 0, // 20: process.SendSignalRequest.signal:type_name -> process.Signal + 20, // 21: process.ConnectRequest.process:type_name -> process.ProcessSelector + 20, // 22: process.StreamInputRequest.StartEvent.process:type_name -> process.ProcessSelector + 14, // 23: process.StreamInputRequest.DataEvent.input:type_name -> process.ProcessInput + 3, // 24: process.Process.List:input_type -> process.ListRequest + 19, // 25: process.Process.Connect:input_type -> process.ConnectRequest + 6, // 26: process.Process.Start:input_type -> process.StartRequest + 7, // 27: process.Process.Update:input_type -> process.UpdateRequest + 15, // 28: process.Process.StreamInput:input_type -> process.StreamInputRequest + 12, // 29: process.Process.SendInput:input_type -> process.SendInputRequest + 17, // 30: process.Process.SendSignal:input_type -> process.SendSignalRequest + 5, // 31: process.Process.List:output_type -> process.ListResponse + 11, // 32: process.Process.Connect:output_type -> process.ConnectResponse + 10, // 33: process.Process.Start:output_type -> process.StartResponse + 8, // 34: process.Process.Update:output_type -> process.UpdateResponse + 16, // 35: process.Process.StreamInput:output_type -> process.StreamInputResponse + 13, // 36: process.Process.SendInput:output_type -> process.SendInputResponse + 18, // 37: process.Process.SendSignal:output_type -> process.SendSignalResponse + 31, // [31:38] is the sub-list for method output_type + 24, // [24:31] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_process_process_proto_init() } +func file_process_process_proto_init() { + if File_process_process_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_process_process_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PTY); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendInputRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendInputResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInputRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInputResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendSignalRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendSignalResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessSelector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PTY_Size); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessEvent_StartEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessEvent_DataEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessEvent_EndEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessEvent_KeepAlive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInputRequest_StartEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInputRequest_DataEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_process_process_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInputRequest_KeepAlive); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_process_process_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_process_process_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_process_process_proto_msgTypes[5].OneofWrappers = []interface{}{} + file_process_process_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_process_process_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*ProcessEvent_Start)(nil), + (*ProcessEvent_Data)(nil), + (*ProcessEvent_End)(nil), + (*ProcessEvent_Keepalive)(nil), + } + file_process_process_proto_msgTypes[13].OneofWrappers = []interface{}{ + (*ProcessInput_Stdin)(nil), + (*ProcessInput_Pty)(nil), + } + file_process_process_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*StreamInputRequest_Start)(nil), + (*StreamInputRequest_Data)(nil), + (*StreamInputRequest_Keepalive)(nil), + } + file_process_process_proto_msgTypes[19].OneofWrappers = []interface{}{ + (*ProcessSelector_Pid)(nil), + (*ProcessSelector_Tag)(nil), + } + file_process_process_proto_msgTypes[23].OneofWrappers = []interface{}{ + (*ProcessEvent_DataEvent_Stdout)(nil), + (*ProcessEvent_DataEvent_Stderr)(nil), + (*ProcessEvent_DataEvent_Pty)(nil), + } + file_process_process_proto_msgTypes[24].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_process_process_proto_rawDesc, + NumEnums: 1, + NumMessages: 29, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_process_process_proto_goTypes, + DependencyIndexes: file_process_process_proto_depIdxs, + EnumInfos: file_process_process_proto_enumTypes, + MessageInfos: file_process_process_proto_msgTypes, + }.Build() + File_process_process_proto = out.File + file_process_process_proto_rawDesc = nil + file_process_process_proto_goTypes = nil + file_process_process_proto_depIdxs = nil +} diff --git a/packages/shared/pkg/grpc/envd/process/processconnect/process.connect.go b/packages/shared/pkg/grpc/envd/process/processconnect/process.connect.go new file mode 100644 index 0000000..7feec8e --- /dev/null +++ b/packages/shared/pkg/grpc/envd/process/processconnect/process.connect.go @@ -0,0 +1,278 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: process/process.proto + +package processconnect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + process "github.com/e2b-dev/infra/packages/shared/pkg/grpc/envd/process" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // ProcessName is the fully-qualified name of the Process service. + ProcessName = "process.Process" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // ProcessListProcedure is the fully-qualified name of the Process's List RPC. + ProcessListProcedure = "/process.Process/List" + // ProcessConnectProcedure is the fully-qualified name of the Process's Connect RPC. + ProcessConnectProcedure = "/process.Process/Connect" + // ProcessStartProcedure is the fully-qualified name of the Process's Start RPC. + ProcessStartProcedure = "/process.Process/Start" + // ProcessUpdateProcedure is the fully-qualified name of the Process's Update RPC. + ProcessUpdateProcedure = "/process.Process/Update" + // ProcessStreamInputProcedure is the fully-qualified name of the Process's StreamInput RPC. + ProcessStreamInputProcedure = "/process.Process/StreamInput" + // ProcessSendInputProcedure is the fully-qualified name of the Process's SendInput RPC. + ProcessSendInputProcedure = "/process.Process/SendInput" + // ProcessSendSignalProcedure is the fully-qualified name of the Process's SendSignal RPC. + ProcessSendSignalProcedure = "/process.Process/SendSignal" +) + +// ProcessClient is a client for the process.Process service. +type ProcessClient interface { + List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) + Connect(context.Context, *connect.Request[process.ConnectRequest]) (*connect.ServerStreamForClient[process.ConnectResponse], error) + Start(context.Context, *connect.Request[process.StartRequest]) (*connect.ServerStreamForClient[process.StartResponse], error) + Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) + // Client input stream ensures ordering of messages + StreamInput(context.Context) *connect.ClientStreamForClient[process.StreamInputRequest, process.StreamInputResponse] + SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) + SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) +} + +// NewProcessClient constructs a client for the process.Process service. By default, it uses the +// Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends +// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewProcessClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) ProcessClient { + baseURL = strings.TrimRight(baseURL, "/") + processMethods := process.File_process_process_proto.Services().ByName("Process").Methods() + return &processClient{ + list: connect.NewClient[process.ListRequest, process.ListResponse]( + httpClient, + baseURL+ProcessListProcedure, + connect.WithSchema(processMethods.ByName("List")), + connect.WithClientOptions(opts...), + ), + connect: connect.NewClient[process.ConnectRequest, process.ConnectResponse]( + httpClient, + baseURL+ProcessConnectProcedure, + connect.WithSchema(processMethods.ByName("Connect")), + connect.WithClientOptions(opts...), + ), + start: connect.NewClient[process.StartRequest, process.StartResponse]( + httpClient, + baseURL+ProcessStartProcedure, + connect.WithSchema(processMethods.ByName("Start")), + connect.WithClientOptions(opts...), + ), + update: connect.NewClient[process.UpdateRequest, process.UpdateResponse]( + httpClient, + baseURL+ProcessUpdateProcedure, + connect.WithSchema(processMethods.ByName("Update")), + connect.WithClientOptions(opts...), + ), + streamInput: connect.NewClient[process.StreamInputRequest, process.StreamInputResponse]( + httpClient, + baseURL+ProcessStreamInputProcedure, + connect.WithSchema(processMethods.ByName("StreamInput")), + connect.WithClientOptions(opts...), + ), + sendInput: connect.NewClient[process.SendInputRequest, process.SendInputResponse]( + httpClient, + baseURL+ProcessSendInputProcedure, + connect.WithSchema(processMethods.ByName("SendInput")), + connect.WithClientOptions(opts...), + ), + sendSignal: connect.NewClient[process.SendSignalRequest, process.SendSignalResponse]( + httpClient, + baseURL+ProcessSendSignalProcedure, + connect.WithSchema(processMethods.ByName("SendSignal")), + connect.WithClientOptions(opts...), + ), + } +} + +// processClient implements ProcessClient. +type processClient struct { + list *connect.Client[process.ListRequest, process.ListResponse] + connect *connect.Client[process.ConnectRequest, process.ConnectResponse] + start *connect.Client[process.StartRequest, process.StartResponse] + update *connect.Client[process.UpdateRequest, process.UpdateResponse] + streamInput *connect.Client[process.StreamInputRequest, process.StreamInputResponse] + sendInput *connect.Client[process.SendInputRequest, process.SendInputResponse] + sendSignal *connect.Client[process.SendSignalRequest, process.SendSignalResponse] +} + +// List calls process.Process.List. +func (c *processClient) List(ctx context.Context, req *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) { + return c.list.CallUnary(ctx, req) +} + +// Connect calls process.Process.Connect. +func (c *processClient) Connect(ctx context.Context, req *connect.Request[process.ConnectRequest]) (*connect.ServerStreamForClient[process.ConnectResponse], error) { + return c.connect.CallServerStream(ctx, req) +} + +// Start calls process.Process.Start. +func (c *processClient) Start(ctx context.Context, req *connect.Request[process.StartRequest]) (*connect.ServerStreamForClient[process.StartResponse], error) { + return c.start.CallServerStream(ctx, req) +} + +// Update calls process.Process.Update. +func (c *processClient) Update(ctx context.Context, req *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) { + return c.update.CallUnary(ctx, req) +} + +// StreamInput calls process.Process.StreamInput. +func (c *processClient) StreamInput(ctx context.Context) *connect.ClientStreamForClient[process.StreamInputRequest, process.StreamInputResponse] { + return c.streamInput.CallClientStream(ctx) +} + +// SendInput calls process.Process.SendInput. +func (c *processClient) SendInput(ctx context.Context, req *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) { + return c.sendInput.CallUnary(ctx, req) +} + +// SendSignal calls process.Process.SendSignal. +func (c *processClient) SendSignal(ctx context.Context, req *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) { + return c.sendSignal.CallUnary(ctx, req) +} + +// ProcessHandler is an implementation of the process.Process service. +type ProcessHandler interface { + List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) + Connect(context.Context, *connect.Request[process.ConnectRequest], *connect.ServerStream[process.ConnectResponse]) error + Start(context.Context, *connect.Request[process.StartRequest], *connect.ServerStream[process.StartResponse]) error + Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) + // Client input stream ensures ordering of messages + StreamInput(context.Context, *connect.ClientStream[process.StreamInputRequest]) (*connect.Response[process.StreamInputResponse], error) + SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) + SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) +} + +// NewProcessHandler builds an HTTP handler from the service implementation. It returns the path on +// which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewProcessHandler(svc ProcessHandler, opts ...connect.HandlerOption) (string, http.Handler) { + processMethods := process.File_process_process_proto.Services().ByName("Process").Methods() + processListHandler := connect.NewUnaryHandler( + ProcessListProcedure, + svc.List, + connect.WithSchema(processMethods.ByName("List")), + connect.WithHandlerOptions(opts...), + ) + processConnectHandler := connect.NewServerStreamHandler( + ProcessConnectProcedure, + svc.Connect, + connect.WithSchema(processMethods.ByName("Connect")), + connect.WithHandlerOptions(opts...), + ) + processStartHandler := connect.NewServerStreamHandler( + ProcessStartProcedure, + svc.Start, + connect.WithSchema(processMethods.ByName("Start")), + connect.WithHandlerOptions(opts...), + ) + processUpdateHandler := connect.NewUnaryHandler( + ProcessUpdateProcedure, + svc.Update, + connect.WithSchema(processMethods.ByName("Update")), + connect.WithHandlerOptions(opts...), + ) + processStreamInputHandler := connect.NewClientStreamHandler( + ProcessStreamInputProcedure, + svc.StreamInput, + connect.WithSchema(processMethods.ByName("StreamInput")), + connect.WithHandlerOptions(opts...), + ) + processSendInputHandler := connect.NewUnaryHandler( + ProcessSendInputProcedure, + svc.SendInput, + connect.WithSchema(processMethods.ByName("SendInput")), + connect.WithHandlerOptions(opts...), + ) + processSendSignalHandler := connect.NewUnaryHandler( + ProcessSendSignalProcedure, + svc.SendSignal, + connect.WithSchema(processMethods.ByName("SendSignal")), + connect.WithHandlerOptions(opts...), + ) + return "/process.Process/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case ProcessListProcedure: + processListHandler.ServeHTTP(w, r) + case ProcessConnectProcedure: + processConnectHandler.ServeHTTP(w, r) + case ProcessStartProcedure: + processStartHandler.ServeHTTP(w, r) + case ProcessUpdateProcedure: + processUpdateHandler.ServeHTTP(w, r) + case ProcessStreamInputProcedure: + processStreamInputHandler.ServeHTTP(w, r) + case ProcessSendInputProcedure: + processSendInputHandler.ServeHTTP(w, r) + case ProcessSendSignalProcedure: + processSendSignalHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedProcessHandler returns CodeUnimplemented from all methods. +type UnimplementedProcessHandler struct{} + +func (UnimplementedProcessHandler) List(context.Context, *connect.Request[process.ListRequest]) (*connect.Response[process.ListResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.List is not implemented")) +} + +func (UnimplementedProcessHandler) Connect(context.Context, *connect.Request[process.ConnectRequest], *connect.ServerStream[process.ConnectResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Connect is not implemented")) +} + +func (UnimplementedProcessHandler) Start(context.Context, *connect.Request[process.StartRequest], *connect.ServerStream[process.StartResponse]) error { + return connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Start is not implemented")) +} + +func (UnimplementedProcessHandler) Update(context.Context, *connect.Request[process.UpdateRequest]) (*connect.Response[process.UpdateResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.Update is not implemented")) +} + +func (UnimplementedProcessHandler) StreamInput(context.Context, *connect.ClientStream[process.StreamInputRequest]) (*connect.Response[process.StreamInputResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.StreamInput is not implemented")) +} + +func (UnimplementedProcessHandler) SendInput(context.Context, *connect.Request[process.SendInputRequest]) (*connect.Response[process.SendInputResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendInput is not implemented")) +} + +func (UnimplementedProcessHandler) SendSignal(context.Context, *connect.Request[process.SendSignalRequest]) (*connect.Response[process.SendSignalResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("process.Process.SendSignal is not implemented")) +} diff --git a/packages/shared/pkg/grpc/envd_command.go b/packages/shared/pkg/grpc/envd_command.go new file mode 100644 index 0000000..6c78563 --- /dev/null +++ b/packages/shared/pkg/grpc/envd_command.go @@ -0,0 +1,69 @@ +package grpc + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "net/url" + + "connectrpc.com/connect" + + "github.com/e2b-dev/infra/packages/shared/pkg/consts" +) + +func StreamToChannel[Res any](ctx context.Context, stream *connect.ServerStreamForClient[Res]) (<-chan *Res, <-chan error) { + out := make(chan *Res) + errCh := make(chan error, 1) + + go func() { + defer close(out) + defer close(errCh) + + for stream.Receive() { + select { + case <-ctx.Done(): + // Context canceled, exit the goroutine + return + case out <- stream.Msg(): + // Send the message to the channel + } + } + + if err := stream.Err(); err != nil { + errCh <- err + return + } + }() + + return out, errCh +} + +func SetSandboxHeader(header http.Header, hostname string, sandboxID string) error { + domain, err := extractDomain(hostname) + if err != nil { + return fmt.Errorf("failed to extract domain from hostname: %w", err) + } + // Construct the host (--.e2b.app) + host := fmt.Sprintf("%d-%s-00000000.%s", consts.DefaultEnvdServerPort, sandboxID, domain) + + header.Set("Host", host) + + return nil +} + +func SetUserHeader(header http.Header, user string) { + userString := fmt.Sprintf("%s:", user) + userBase64 := base64.StdEncoding.EncodeToString([]byte(userString)) + basic := fmt.Sprintf("Basic %s", userBase64) + header.Set("Authorization", basic) +} + +func extractDomain(input string) (string, error) { + parsedURL, err := url.Parse(input) + if err != nil || parsedURL.Host == "" { + return "", fmt.Errorf("invalid URL: %s", input) + } + + return parsedURL.Hostname(), nil +} diff --git a/packages/shared/pkg/grpc/orchestrator-info/info.pb.go b/packages/shared/pkg/grpc/orchestrator-info/info.pb.go new file mode 100644 index 0000000..b8cf4c3 --- /dev/null +++ b/packages/shared/pkg/grpc/orchestrator-info/info.pb.go @@ -0,0 +1,455 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v5.29.3 +// source: info.proto + +package orchestrator + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// needs to be different from the enumeration in the template manager +type ServiceInfoStatus int32 + +const ( + ServiceInfoStatus_OrchestratorHealthy ServiceInfoStatus = 0 + ServiceInfoStatus_OrchestratorDraining ServiceInfoStatus = 1 + ServiceInfoStatus_OrchestratorUnhealthy ServiceInfoStatus = 2 +) + +// Enum value maps for ServiceInfoStatus. +var ( + ServiceInfoStatus_name = map[int32]string{ + 0: "OrchestratorHealthy", + 1: "OrchestratorDraining", + 2: "OrchestratorUnhealthy", + } + ServiceInfoStatus_value = map[string]int32{ + "OrchestratorHealthy": 0, + "OrchestratorDraining": 1, + "OrchestratorUnhealthy": 2, + } +) + +func (x ServiceInfoStatus) Enum() *ServiceInfoStatus { + p := new(ServiceInfoStatus) + *p = x + return p +} + +func (x ServiceInfoStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceInfoStatus) Descriptor() protoreflect.EnumDescriptor { + return file_info_proto_enumTypes[0].Descriptor() +} + +func (ServiceInfoStatus) Type() protoreflect.EnumType { + return &file_info_proto_enumTypes[0] +} + +func (x ServiceInfoStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceInfoStatus.Descriptor instead. +func (ServiceInfoStatus) EnumDescriptor() ([]byte, []int) { + return file_info_proto_rawDescGZIP(), []int{0} +} + +type ServiceInfoRole int32 + +const ( + ServiceInfoRole_TemplateBuilder ServiceInfoRole = 0 + ServiceInfoRole_Orchestrator ServiceInfoRole = 1 +) + +// Enum value maps for ServiceInfoRole. +var ( + ServiceInfoRole_name = map[int32]string{ + 0: "TemplateBuilder", + 1: "Orchestrator", + } + ServiceInfoRole_value = map[string]int32{ + "TemplateBuilder": 0, + "Orchestrator": 1, + } +) + +func (x ServiceInfoRole) Enum() *ServiceInfoRole { + p := new(ServiceInfoRole) + *p = x + return p +} + +func (x ServiceInfoRole) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceInfoRole) Descriptor() protoreflect.EnumDescriptor { + return file_info_proto_enumTypes[1].Descriptor() +} + +func (ServiceInfoRole) Type() protoreflect.EnumType { + return &file_info_proto_enumTypes[1] +} + +func (x ServiceInfoRole) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceInfoRole.Descriptor instead. +func (ServiceInfoRole) EnumDescriptor() ([]byte, []int) { + return file_info_proto_rawDescGZIP(), []int{1} +} + +type ServiceInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + ServiceId string `protobuf:"bytes,2,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + ServiceVersion string `protobuf:"bytes,3,opt,name=service_version,json=serviceVersion,proto3" json:"service_version,omitempty"` + ServiceCommit string `protobuf:"bytes,4,opt,name=service_commit,json=serviceCommit,proto3" json:"service_commit,omitempty"` + ServiceStatus ServiceInfoStatus `protobuf:"varint,51,opt,name=service_status,json=serviceStatus,proto3,enum=ServiceInfoStatus" json:"service_status,omitempty"` + ServiceRoles []ServiceInfoRole `protobuf:"varint,52,rep,packed,name=service_roles,json=serviceRoles,proto3,enum=ServiceInfoRole" json:"service_roles,omitempty"` + ServiceStartup *timestamppb.Timestamp `protobuf:"bytes,53,opt,name=service_startup,json=serviceStartup,proto3" json:"service_startup,omitempty"` + MetricVcpuUsed int64 `protobuf:"varint,101,opt,name=metric_vcpu_used,json=metricVcpuUsed,proto3" json:"metric_vcpu_used,omitempty"` + MetricMemoryUsedMb int64 `protobuf:"varint,102,opt,name=metric_memory_used_mb,json=metricMemoryUsedMb,proto3" json:"metric_memory_used_mb,omitempty"` + MetricDiskMb int64 `protobuf:"varint,103,opt,name=metric_disk_mb,json=metricDiskMb,proto3" json:"metric_disk_mb,omitempty"` + MetricSandboxesRunning int64 `protobuf:"varint,104,opt,name=metric_sandboxes_running,json=metricSandboxesRunning,proto3" json:"metric_sandboxes_running,omitempty"` +} + +func (x *ServiceInfoResponse) Reset() { + *x = ServiceInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceInfoResponse) ProtoMessage() {} + +func (x *ServiceInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceInfoResponse.ProtoReflect.Descriptor instead. +func (*ServiceInfoResponse) Descriptor() ([]byte, []int) { + return file_info_proto_rawDescGZIP(), []int{0} +} + +func (x *ServiceInfoResponse) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *ServiceInfoResponse) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *ServiceInfoResponse) GetServiceVersion() string { + if x != nil { + return x.ServiceVersion + } + return "" +} + +func (x *ServiceInfoResponse) GetServiceCommit() string { + if x != nil { + return x.ServiceCommit + } + return "" +} + +func (x *ServiceInfoResponse) GetServiceStatus() ServiceInfoStatus { + if x != nil { + return x.ServiceStatus + } + return ServiceInfoStatus_OrchestratorHealthy +} + +func (x *ServiceInfoResponse) GetServiceRoles() []ServiceInfoRole { + if x != nil { + return x.ServiceRoles + } + return nil +} + +func (x *ServiceInfoResponse) GetServiceStartup() *timestamppb.Timestamp { + if x != nil { + return x.ServiceStartup + } + return nil +} + +func (x *ServiceInfoResponse) GetMetricVcpuUsed() int64 { + if x != nil { + return x.MetricVcpuUsed + } + return 0 +} + +func (x *ServiceInfoResponse) GetMetricMemoryUsedMb() int64 { + if x != nil { + return x.MetricMemoryUsedMb + } + return 0 +} + +func (x *ServiceInfoResponse) GetMetricDiskMb() int64 { + if x != nil { + return x.MetricDiskMb + } + return 0 +} + +func (x *ServiceInfoResponse) GetMetricSandboxesRunning() int64 { + if x != nil { + return x.MetricSandboxesRunning + } + return 0 +} + +type ServiceStatusChangeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceStatus ServiceInfoStatus `protobuf:"varint,2,opt,name=service_status,json=serviceStatus,proto3,enum=ServiceInfoStatus" json:"service_status,omitempty"` +} + +func (x *ServiceStatusChangeRequest) Reset() { + *x = ServiceStatusChangeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceStatusChangeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceStatusChangeRequest) ProtoMessage() {} + +func (x *ServiceStatusChangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceStatusChangeRequest.ProtoReflect.Descriptor instead. +func (*ServiceStatusChangeRequest) Descriptor() ([]byte, []int) { + return file_info_proto_rawDescGZIP(), []int{1} +} + +func (x *ServiceStatusChangeRequest) GetServiceStatus() ServiceInfoStatus { + if x != nil { + return x.ServiceStatus + } + return ServiceInfoStatus_OrchestratorHealthy +} + +var File_info_proto protoreflect.FileDescriptor + +var file_info_proto_rawDesc = []byte{ + 0x0a, 0x0a, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91, 0x04, 0x0a, 0x13, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x39, 0x0a, 0x0e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x33, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x18, + 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, + 0x70, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, 0x63, 0x70, 0x75, + 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x65, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x56, 0x63, 0x70, 0x75, 0x55, 0x73, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x15, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x73, 0x65, + 0x64, 0x5f, 0x6d, 0x62, 0x18, 0x66, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x73, 0x65, 0x64, 0x4d, 0x62, 0x12, 0x24, + 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6d, 0x62, + 0x18, 0x67, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x69, + 0x73, 0x6b, 0x4d, 0x62, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x18, 0x68, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x57, + 0x0a, 0x1a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x0e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x61, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x17, 0x0a, 0x13, + 0x4f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x79, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x4f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x12, + 0x19, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x55, + 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x10, 0x02, 0x2a, 0x38, 0x0a, 0x0f, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x13, 0x0a, + 0x0f, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x10, 0x01, 0x32, 0x98, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4c, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x1b, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, + 0x2f, 0x5a, 0x2d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, 0x62, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, + 0x66, 0x72, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_info_proto_rawDescOnce sync.Once + file_info_proto_rawDescData = file_info_proto_rawDesc +) + +func file_info_proto_rawDescGZIP() []byte { + file_info_proto_rawDescOnce.Do(func() { + file_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_info_proto_rawDescData) + }) + return file_info_proto_rawDescData +} + +var file_info_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_info_proto_goTypes = []interface{}{ + (ServiceInfoStatus)(0), // 0: ServiceInfoStatus + (ServiceInfoRole)(0), // 1: ServiceInfoRole + (*ServiceInfoResponse)(nil), // 2: ServiceInfoResponse + (*ServiceStatusChangeRequest)(nil), // 3: ServiceStatusChangeRequest + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 5: google.protobuf.Empty +} +var file_info_proto_depIdxs = []int32{ + 0, // 0: ServiceInfoResponse.service_status:type_name -> ServiceInfoStatus + 1, // 1: ServiceInfoResponse.service_roles:type_name -> ServiceInfoRole + 4, // 2: ServiceInfoResponse.service_startup:type_name -> google.protobuf.Timestamp + 0, // 3: ServiceStatusChangeRequest.service_status:type_name -> ServiceInfoStatus + 5, // 4: InfoService.ServiceInfo:input_type -> google.protobuf.Empty + 3, // 5: InfoService.ServiceStatusOverride:input_type -> ServiceStatusChangeRequest + 2, // 6: InfoService.ServiceInfo:output_type -> ServiceInfoResponse + 5, // 7: InfoService.ServiceStatusOverride:output_type -> google.protobuf.Empty + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_info_proto_init() } +func file_info_proto_init() { + if File_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceStatusChangeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_info_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_info_proto_goTypes, + DependencyIndexes: file_info_proto_depIdxs, + EnumInfos: file_info_proto_enumTypes, + MessageInfos: file_info_proto_msgTypes, + }.Build() + File_info_proto = out.File + file_info_proto_rawDesc = nil + file_info_proto_goTypes = nil + file_info_proto_depIdxs = nil +} diff --git a/packages/shared/pkg/grpc/orchestrator-info/info_grpc.pb.go b/packages/shared/pkg/grpc/orchestrator-info/info_grpc.pb.go new file mode 100644 index 0000000..890df5e --- /dev/null +++ b/packages/shared/pkg/grpc/orchestrator-info/info_grpc.pb.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v5.29.3 +// source: info.proto + +package orchestrator + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// InfoServiceClient is the client API for InfoService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type InfoServiceClient interface { + ServiceInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServiceInfoResponse, error) + ServiceStatusOverride(ctx context.Context, in *ServiceStatusChangeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type infoServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewInfoServiceClient(cc grpc.ClientConnInterface) InfoServiceClient { + return &infoServiceClient{cc} +} + +func (c *infoServiceClient) ServiceInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServiceInfoResponse, error) { + out := new(ServiceInfoResponse) + err := c.cc.Invoke(ctx, "/InfoService/ServiceInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *infoServiceClient) ServiceStatusOverride(ctx context.Context, in *ServiceStatusChangeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/InfoService/ServiceStatusOverride", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// InfoServiceServer is the server API for InfoService service. +// All implementations must embed UnimplementedInfoServiceServer +// for forward compatibility +type InfoServiceServer interface { + ServiceInfo(context.Context, *emptypb.Empty) (*ServiceInfoResponse, error) + ServiceStatusOverride(context.Context, *ServiceStatusChangeRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedInfoServiceServer() +} + +// UnimplementedInfoServiceServer must be embedded to have forward compatible implementations. +type UnimplementedInfoServiceServer struct { +} + +func (UnimplementedInfoServiceServer) ServiceInfo(context.Context, *emptypb.Empty) (*ServiceInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ServiceInfo not implemented") +} +func (UnimplementedInfoServiceServer) ServiceStatusOverride(context.Context, *ServiceStatusChangeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ServiceStatusOverride not implemented") +} +func (UnimplementedInfoServiceServer) mustEmbedUnimplementedInfoServiceServer() {} + +// UnsafeInfoServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to InfoServiceServer will +// result in compilation errors. +type UnsafeInfoServiceServer interface { + mustEmbedUnimplementedInfoServiceServer() +} + +func RegisterInfoServiceServer(s grpc.ServiceRegistrar, srv InfoServiceServer) { + s.RegisterService(&InfoService_ServiceDesc, srv) +} + +func _InfoService_ServiceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InfoServiceServer).ServiceInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/InfoService/ServiceInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InfoServiceServer).ServiceInfo(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _InfoService_ServiceStatusOverride_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ServiceStatusChangeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InfoServiceServer).ServiceStatusOverride(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/InfoService/ServiceStatusOverride", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InfoServiceServer).ServiceStatusOverride(ctx, req.(*ServiceStatusChangeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// InfoService_ServiceDesc is the grpc.ServiceDesc for InfoService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var InfoService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "InfoService", + HandlerType: (*InfoServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ServiceInfo", + Handler: _InfoService_ServiceInfo_Handler, + }, + { + MethodName: "ServiceStatusOverride", + Handler: _InfoService_ServiceStatusOverride_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "info.proto", +} diff --git a/packages/shared/pkg/grpc/orchestrator/orchestrator.pb.go b/packages/shared/pkg/grpc/orchestrator/orchestrator.pb.go index 75d8576..ca94736 100644 --- a/packages/shared/pkg/grpc/orchestrator/orchestrator.pb.go +++ b/packages/shared/pkg/grpc/orchestrator/orchestrator.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 -// protoc v3.19.6 -// source: packages/orchestrator/orchestrator.proto +// protoc-gen-go v1.28.1 +// protoc v5.29.3 +// source: orchestrator.proto package orchestrator @@ -13,7 +13,6 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -24,7 +23,10 @@ const ( ) type SandboxConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data required for creating a new sandbox. TemplateId string `protobuf:"bytes,1,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` @@ -32,29 +34,31 @@ type SandboxConfig struct { FirecrackerVersion string `protobuf:"bytes,4,opt,name=firecracker_version,json=firecrackerVersion,proto3" json:"firecracker_version,omitempty"` HugePages bool `protobuf:"varint,5,opt,name=huge_pages,json=hugePages,proto3" json:"huge_pages,omitempty"` SandboxId string `protobuf:"bytes,6,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - EnvVars map[string]string `protobuf:"bytes,7,rep,name=env_vars,json=envVars,proto3" json:"env_vars,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + EnvVars map[string]string `protobuf:"bytes,7,rep,name=env_vars,json=envVars,proto3" json:"env_vars,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Metadata about the sandbox. - Metadata map[string]string `protobuf:"bytes,8,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Metadata map[string]string `protobuf:"bytes,8,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Alias *string `protobuf:"bytes,9,opt,name=alias,proto3,oneof" json:"alias,omitempty"` EnvdVersion string `protobuf:"bytes,10,opt,name=envd_version,json=envdVersion,proto3" json:"envd_version,omitempty"` Vcpu int64 `protobuf:"varint,11,opt,name=vcpu,proto3" json:"vcpu,omitempty"` RamMb int64 `protobuf:"varint,12,opt,name=ram_mb,json=ramMb,proto3" json:"ram_mb,omitempty"` TeamId string `protobuf:"bytes,13,opt,name=team_id,json=teamId,proto3" json:"team_id,omitempty"` // Maximum length of the sandbox in Hours. - MaxSandboxLength int64 `protobuf:"varint,14,opt,name=max_sandbox_length,json=maxSandboxLength,proto3" json:"max_sandbox_length,omitempty"` - TotalDiskSizeMb int64 `protobuf:"varint,15,opt,name=total_disk_size_mb,json=totalDiskSizeMb,proto3" json:"total_disk_size_mb,omitempty"` - Snapshot bool `protobuf:"varint,16,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - BaseTemplateId string `protobuf:"bytes,17,opt,name=base_template_id,json=baseTemplateId,proto3" json:"base_template_id,omitempty"` - AutoPause *bool `protobuf:"varint,18,opt,name=auto_pause,json=autoPause,proto3,oneof" json:"auto_pause,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + MaxSandboxLength int64 `protobuf:"varint,14,opt,name=max_sandbox_length,json=maxSandboxLength,proto3" json:"max_sandbox_length,omitempty"` + TotalDiskSizeMb int64 `protobuf:"varint,15,opt,name=total_disk_size_mb,json=totalDiskSizeMb,proto3" json:"total_disk_size_mb,omitempty"` + Snapshot bool `protobuf:"varint,16,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + BaseTemplateId string `protobuf:"bytes,17,opt,name=base_template_id,json=baseTemplateId,proto3" json:"base_template_id,omitempty"` + AutoPause *bool `protobuf:"varint,18,opt,name=auto_pause,json=autoPause,proto3,oneof" json:"auto_pause,omitempty"` + EnvdAccessToken *string `protobuf:"bytes,19,opt,name=envd_access_token,json=envdAccessToken,proto3,oneof" json:"envd_access_token,omitempty"` + ExecutionId string `protobuf:"bytes,20,opt,name=execution_id,json=executionId,proto3" json:"execution_id,omitempty"` } func (x *SandboxConfig) Reset() { *x = SandboxConfig{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxConfig) String() string { @@ -64,8 +68,8 @@ func (x *SandboxConfig) String() string { func (*SandboxConfig) ProtoMessage() {} func (x *SandboxConfig) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[0] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,7 +81,7 @@ func (x *SandboxConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxConfig.ProtoReflect.Descriptor instead. func (*SandboxConfig) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{0} + return file_orchestrator_proto_rawDescGZIP(), []int{0} } func (x *SandboxConfig) GetTemplateId() string { @@ -206,20 +210,37 @@ func (x *SandboxConfig) GetAutoPause() bool { return false } +func (x *SandboxConfig) GetEnvdAccessToken() string { + if x != nil && x.EnvdAccessToken != nil { + return *x.EnvdAccessToken + } + return "" +} + +func (x *SandboxConfig) GetExecutionId() string { + if x != nil { + return x.ExecutionId + } + return "" +} + type SandboxCreateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Sandbox *SandboxConfig `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` - StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *SandboxConfig `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *SandboxCreateRequest) Reset() { *x = SandboxCreateRequest{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxCreateRequest) String() string { @@ -229,8 +250,8 @@ func (x *SandboxCreateRequest) String() string { func (*SandboxCreateRequest) ProtoMessage() {} func (x *SandboxCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[1] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -242,7 +263,7 @@ func (x *SandboxCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxCreateRequest.ProtoReflect.Descriptor instead. func (*SandboxCreateRequest) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{1} + return file_orchestrator_proto_rawDescGZIP(), []int{1} } func (x *SandboxCreateRequest) GetSandbox() *SandboxConfig { @@ -267,17 +288,20 @@ func (x *SandboxCreateRequest) GetEndTime() *timestamppb.Timestamp { } type SandboxCreateResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` } func (x *SandboxCreateResponse) Reset() { *x = SandboxCreateResponse{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxCreateResponse) String() string { @@ -287,8 +311,8 @@ func (x *SandboxCreateResponse) String() string { func (*SandboxCreateResponse) ProtoMessage() {} func (x *SandboxCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[2] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -300,7 +324,7 @@ func (x *SandboxCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxCreateResponse.ProtoReflect.Descriptor instead. func (*SandboxCreateResponse) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{2} + return file_orchestrator_proto_rawDescGZIP(), []int{2} } func (x *SandboxCreateResponse) GetClientId() string { @@ -311,18 +335,21 @@ func (x *SandboxCreateResponse) GetClientId() string { } type SandboxUpdateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *SandboxUpdateRequest) Reset() { *x = SandboxUpdateRequest{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxUpdateRequest) String() string { @@ -332,8 +359,8 @@ func (x *SandboxUpdateRequest) String() string { func (*SandboxUpdateRequest) ProtoMessage() {} func (x *SandboxUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[3] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -345,7 +372,7 @@ func (x *SandboxUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxUpdateRequest.ProtoReflect.Descriptor instead. func (*SandboxUpdateRequest) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{3} + return file_orchestrator_proto_rawDescGZIP(), []int{3} } func (x *SandboxUpdateRequest) GetSandboxId() string { @@ -363,17 +390,20 @@ func (x *SandboxUpdateRequest) GetEndTime() *timestamppb.Timestamp { } type SandboxDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` } func (x *SandboxDeleteRequest) Reset() { *x = SandboxDeleteRequest{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxDeleteRequest) String() string { @@ -383,8 +413,8 @@ func (x *SandboxDeleteRequest) String() string { func (*SandboxDeleteRequest) ProtoMessage() {} func (x *SandboxDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[4] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -396,7 +426,7 @@ func (x *SandboxDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxDeleteRequest.ProtoReflect.Descriptor instead. func (*SandboxDeleteRequest) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{4} + return file_orchestrator_proto_rawDescGZIP(), []int{4} } func (x *SandboxDeleteRequest) GetSandboxId() string { @@ -407,19 +437,22 @@ func (x *SandboxDeleteRequest) GetSandboxId() string { } type SandboxPauseRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - TemplateId string `protobuf:"bytes,2,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` - BuildId string `protobuf:"bytes,3,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + TemplateId string `protobuf:"bytes,2,opt,name=template_id,json=templateId,proto3" json:"template_id,omitempty"` + BuildId string `protobuf:"bytes,3,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` } func (x *SandboxPauseRequest) Reset() { *x = SandboxPauseRequest{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxPauseRequest) String() string { @@ -429,8 +462,8 @@ func (x *SandboxPauseRequest) String() string { func (*SandboxPauseRequest) ProtoMessage() {} func (x *SandboxPauseRequest) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[5] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -442,7 +475,7 @@ func (x *SandboxPauseRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxPauseRequest.ProtoReflect.Descriptor instead. func (*SandboxPauseRequest) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{5} + return file_orchestrator_proto_rawDescGZIP(), []int{5} } func (x *SandboxPauseRequest) GetSandboxId() string { @@ -467,20 +500,23 @@ func (x *SandboxPauseRequest) GetBuildId() string { } type RunningSandbox struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *SandboxConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` - StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *SandboxConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + ClientId string `protobuf:"bytes,2,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *RunningSandbox) Reset() { *x = RunningSandbox{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *RunningSandbox) String() string { @@ -490,8 +526,8 @@ func (x *RunningSandbox) String() string { func (*RunningSandbox) ProtoMessage() {} func (x *RunningSandbox) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[6] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -503,7 +539,7 @@ func (x *RunningSandbox) ProtoReflect() protoreflect.Message { // Deprecated: Use RunningSandbox.ProtoReflect.Descriptor instead. func (*RunningSandbox) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{6} + return file_orchestrator_proto_rawDescGZIP(), []int{6} } func (x *RunningSandbox) GetConfig() *SandboxConfig { @@ -535,17 +571,20 @@ func (x *RunningSandbox) GetEndTime() *timestamppb.Timestamp { } type SandboxListResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Sandboxes []*RunningSandbox `protobuf:"bytes,1,rep,name=sandboxes,proto3" json:"sandboxes,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandboxes []*RunningSandbox `protobuf:"bytes,1,rep,name=sandboxes,proto3" json:"sandboxes,omitempty"` } func (x *SandboxListResponse) Reset() { *x = SandboxListResponse{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxListResponse) String() string { @@ -555,8 +594,8 @@ func (x *SandboxListResponse) String() string { func (*SandboxListResponse) ProtoMessage() {} func (x *SandboxListResponse) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[7] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -568,7 +607,7 @@ func (x *SandboxListResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxListResponse.ProtoReflect.Descriptor instead. func (*SandboxListResponse) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{7} + return file_orchestrator_proto_rawDescGZIP(), []int{7} } func (x *SandboxListResponse) GetSandboxes() []*RunningSandbox { @@ -579,18 +618,21 @@ func (x *SandboxListResponse) GetSandboxes() []*RunningSandbox { } type CachedBuildInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + BuildId string `protobuf:"bytes,1,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache } func (x *CachedBuildInfo) Reset() { *x = CachedBuildInfo{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *CachedBuildInfo) String() string { @@ -600,8 +642,8 @@ func (x *CachedBuildInfo) String() string { func (*CachedBuildInfo) ProtoMessage() {} func (x *CachedBuildInfo) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[8] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -613,7 +655,7 @@ func (x *CachedBuildInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use CachedBuildInfo.ProtoReflect.Descriptor instead. func (*CachedBuildInfo) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{8} + return file_orchestrator_proto_rawDescGZIP(), []int{8} } func (x *CachedBuildInfo) GetBuildId() string { @@ -631,17 +673,20 @@ func (x *CachedBuildInfo) GetExpirationTime() *timestamppb.Timestamp { } type SandboxListCachedBuildsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Builds []*CachedBuildInfo `protobuf:"bytes,1,rep,name=builds,proto3" json:"builds,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Builds []*CachedBuildInfo `protobuf:"bytes,1,rep,name=builds,proto3" json:"builds,omitempty"` } func (x *SandboxListCachedBuildsResponse) Reset() { *x = SandboxListCachedBuildsResponse{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_orchestrator_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *SandboxListCachedBuildsResponse) String() string { @@ -651,8 +696,8 @@ func (x *SandboxListCachedBuildsResponse) String() string { func (*SandboxListCachedBuildsResponse) ProtoMessage() {} func (x *SandboxListCachedBuildsResponse) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[9] - if x != nil { + mi := &file_orchestrator_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -664,7 +709,7 @@ func (x *SandboxListCachedBuildsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SandboxListCachedBuildsResponse.ProtoReflect.Descriptor instead. func (*SandboxListCachedBuildsResponse) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{9} + return file_orchestrator_proto_rawDescGZIP(), []int{9} } func (x *SandboxListCachedBuildsResponse) GetBuilds() []*CachedBuildInfo { @@ -674,331 +719,176 @@ func (x *SandboxListCachedBuildsResponse) GetBuilds() []*CachedBuildInfo { return nil } -type SandboxCmdRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SandboxId string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` - Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` - Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` - Env map[string]string `protobuf:"bytes,4,rep,name=env,proto3" json:"env,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - WorkingDir string `protobuf:"bytes,5,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SandboxCmdRequest) Reset() { - *x = SandboxCmdRequest{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SandboxCmdRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SandboxCmdRequest) ProtoMessage() {} - -func (x *SandboxCmdRequest) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SandboxCmdRequest.ProtoReflect.Descriptor instead. -func (*SandboxCmdRequest) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{10} -} - -func (x *SandboxCmdRequest) GetSandboxId() string { - if x != nil { - return x.SandboxId - } - return "" -} - -func (x *SandboxCmdRequest) GetCommand() string { - if x != nil { - return x.Command - } - return "" -} - -func (x *SandboxCmdRequest) GetArgs() []string { - if x != nil { - return x.Args - } - return nil -} - -func (x *SandboxCmdRequest) GetEnv() map[string]string { - if x != nil { - return x.Env - } - return nil -} - -func (x *SandboxCmdRequest) GetWorkingDir() string { - if x != nil { - return x.WorkingDir - } - return "" -} - -type SandboxCmdResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` - ExitCode int32 `protobuf:"varint,2,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SandboxCmdResponse) Reset() { - *x = SandboxCmdResponse{} - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SandboxCmdResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SandboxCmdResponse) ProtoMessage() {} - -func (x *SandboxCmdResponse) ProtoReflect() protoreflect.Message { - mi := &file_packages_orchestrator_orchestrator_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SandboxCmdResponse.ProtoReflect.Descriptor instead. -func (*SandboxCmdResponse) Descriptor() ([]byte, []int) { - return file_packages_orchestrator_orchestrator_proto_rawDescGZIP(), []int{11} -} - -func (x *SandboxCmdResponse) GetOutput() string { - if x != nil { - return x.Output - } - return "" -} - -func (x *SandboxCmdResponse) GetExitCode() int32 { - if x != nil { - return x.ExitCode - } - return 0 -} - -func (x *SandboxCmdResponse) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -var File_packages_orchestrator_orchestrator_proto protoreflect.FileDescriptor - -var file_packages_orchestrator_orchestrator_proto_rawDesc = string([]byte{ - 0x0a, 0x28, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, - 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac, 0x06, 0x0a, 0x0d, 0x53, 0x61, 0x6e, - 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, - 0x13, 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x66, 0x69, 0x72, 0x65, - 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, - 0x0a, 0x0a, 0x68, 0x75, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x68, 0x75, 0x67, 0x65, 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1d, 0x0a, - 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, - 0x65, 0x6e, 0x76, 0x5f, 0x76, 0x61, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, - 0x6e, 0x76, 0x56, 0x61, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x76, - 0x56, 0x61, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, - 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x76, - 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x65, 0x6e, 0x76, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, - 0x76, 0x63, 0x70, 0x75, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x76, 0x63, 0x70, 0x75, - 0x12, 0x15, 0x0a, 0x06, 0x72, 0x61, 0x6d, 0x5f, 0x6d, 0x62, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x05, 0x72, 0x61, 0x6d, 0x4d, 0x62, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x65, 0x61, 0x6d, 0x5f, - 0x69, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x61, 0x6d, 0x49, 0x64, - 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x61, - 0x78, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2b, - 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, - 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, - 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x75, 0x73, 0x65, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x61, 0x75, - 0x73, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x6e, 0x76, 0x56, 0x61, 0x72, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, - 0x0a, 0x06, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, 0x75, 0x74, - 0x6f, 0x5f, 0x70, 0x61, 0x75, 0x73, 0x65, 0x22, 0xb2, 0x01, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, - 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x28, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x34, 0x0a, 0x15, - 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x22, 0x6c, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, +var File_orchestrator_proto protoreflect.FileDescriptor + +var file_orchestrator_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x96, 0x07, 0x0a, 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, + 0x12, 0x25, 0x0a, 0x0e, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x66, 0x69, 0x72, 0x65, 0x63, + 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, + 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x75, 0x67, 0x65, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x75, + 0x67, 0x65, 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x5f, 0x76, 0x61, + 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x76, 0x56, 0x61, 0x72, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x76, 0x56, 0x61, 0x72, 0x73, 0x12, 0x38, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x76, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x64, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x76, 0x63, 0x70, 0x75, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x76, 0x63, 0x70, 0x75, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x61, + 0x6d, 0x5f, 0x6d, 0x62, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x61, 0x6d, 0x4d, + 0x62, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x61, + 0x78, 0x5f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6d, 0x61, 0x78, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6d, 0x62, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x53, + 0x69, 0x7a, 0x65, 0x4d, 0x62, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x61, + 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x75, 0x73, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x01, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x61, 0x75, 0x73, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x2f, 0x0a, 0x11, 0x65, 0x6e, 0x76, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x0f, 0x65, 0x6e, + 0x76, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x6e, 0x76, 0x56, 0x61, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, 0x0a, 0x06, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x70, 0x61, 0x75, 0x73, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x65, 0x6e, 0x76, 0x64, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x01, 0x0a, 0x14, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x39, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, - 0x22, 0x35, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, - 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x70, 0x0a, 0x13, 0x53, 0x61, 0x6e, 0x64, 0x62, - 0x6f, 0x78, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x22, 0x34, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x6c, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x64, 0x12, 0x19, - 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x0e, 0x52, 0x75, - 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x26, 0x0a, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x53, - 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, - 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x22, 0x44, 0x0a, 0x13, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, - 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x73, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x09, - 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x22, 0x71, 0x0a, 0x0f, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x08, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x4b, 0x0a, 0x1f, - 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x28, 0x0a, 0x06, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x06, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x53, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6d, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, - 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x2d, 0x0a, 0x03, - 0x65, 0x6e, 0x76, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x53, 0x61, 0x6e, 0x64, - 0x62, 0x6f, 0x78, 0x43, 0x6d, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x6e, - 0x76, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1f, 0x0a, 0x0b, 0x77, - 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x72, 0x1a, 0x36, 0x0a, 0x08, - 0x45, 0x6e, 0x76, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5f, 0x0a, 0x12, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, - 0x6d, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0xa6, 0x03, 0x0a, 0x0e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, - 0x78, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x53, 0x61, 0x6e, 0x64, - 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x37, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x04, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x53, 0x61, 0x6e, - 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, 0x6e, - 0x64, 0x62, 0x6f, 0x78, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x50, 0x61, 0x75, - 0x73, 0x65, 0x12, 0x14, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x50, 0x61, 0x75, 0x73, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x2e, 0x0a, 0x03, 0x43, 0x6d, 0x64, 0x12, 0x12, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, - 0x78, 0x43, 0x6d, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x53, 0x61, - 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6d, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4c, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x53, - 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2f, - 0x5a, 0x2d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, 0x62, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, - 0x72, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x35, 0x0a, + 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x70, 0x0a, 0x13, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, + 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x22, 0xc7, 0x01, + 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x12, 0x26, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, + 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x44, 0x0a, 0x13, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x22, 0x71, 0x0a, + 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x19, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0f, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x4b, 0x0a, 0x1f, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x32, 0xf6, 0x02, + 0x0a, 0x0e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x37, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x06, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x34, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x14, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x15, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x35, 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x14, 0x2e, 0x53, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4c, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x12, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2f, 0x5a, 0x2d, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, 0x62, + 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x68, 0x65, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} var ( - file_packages_orchestrator_orchestrator_proto_rawDescOnce sync.Once - file_packages_orchestrator_orchestrator_proto_rawDescData []byte + file_orchestrator_proto_rawDescOnce sync.Once + file_orchestrator_proto_rawDescData = file_orchestrator_proto_rawDesc ) -func file_packages_orchestrator_orchestrator_proto_rawDescGZIP() []byte { - file_packages_orchestrator_orchestrator_proto_rawDescOnce.Do(func() { - file_packages_orchestrator_orchestrator_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_packages_orchestrator_orchestrator_proto_rawDesc), len(file_packages_orchestrator_orchestrator_proto_rawDesc))) +func file_orchestrator_proto_rawDescGZIP() []byte { + file_orchestrator_proto_rawDescOnce.Do(func() { + file_orchestrator_proto_rawDescData = protoimpl.X.CompressGZIP(file_orchestrator_proto_rawDescData) }) - return file_packages_orchestrator_orchestrator_proto_rawDescData + return file_orchestrator_proto_rawDescData } -var file_packages_orchestrator_orchestrator_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_packages_orchestrator_orchestrator_proto_goTypes = []any{ +var file_orchestrator_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_orchestrator_proto_goTypes = []interface{}{ (*SandboxConfig)(nil), // 0: SandboxConfig (*SandboxCreateRequest)(nil), // 1: SandboxCreateRequest (*SandboxCreateResponse)(nil), // 2: SandboxCreateResponse @@ -1009,70 +899,187 @@ var file_packages_orchestrator_orchestrator_proto_goTypes = []any{ (*SandboxListResponse)(nil), // 7: SandboxListResponse (*CachedBuildInfo)(nil), // 8: CachedBuildInfo (*SandboxListCachedBuildsResponse)(nil), // 9: SandboxListCachedBuildsResponse - (*SandboxCmdRequest)(nil), // 10: SandboxCmdRequest - (*SandboxCmdResponse)(nil), // 11: SandboxCmdResponse - nil, // 12: SandboxConfig.EnvVarsEntry - nil, // 13: SandboxConfig.MetadataEntry - nil, // 14: SandboxCmdRequest.EnvEntry - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (*emptypb.Empty)(nil), // 16: google.protobuf.Empty -} -var file_packages_orchestrator_orchestrator_proto_depIdxs = []int32{ - 12, // 0: SandboxConfig.env_vars:type_name -> SandboxConfig.EnvVarsEntry - 13, // 1: SandboxConfig.metadata:type_name -> SandboxConfig.MetadataEntry + nil, // 10: SandboxConfig.EnvVarsEntry + nil, // 11: SandboxConfig.MetadataEntry + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 13: google.protobuf.Empty +} +var file_orchestrator_proto_depIdxs = []int32{ + 10, // 0: SandboxConfig.env_vars:type_name -> SandboxConfig.EnvVarsEntry + 11, // 1: SandboxConfig.metadata:type_name -> SandboxConfig.MetadataEntry 0, // 2: SandboxCreateRequest.sandbox:type_name -> SandboxConfig - 15, // 3: SandboxCreateRequest.start_time:type_name -> google.protobuf.Timestamp - 15, // 4: SandboxCreateRequest.end_time:type_name -> google.protobuf.Timestamp - 15, // 5: SandboxUpdateRequest.end_time:type_name -> google.protobuf.Timestamp + 12, // 3: SandboxCreateRequest.start_time:type_name -> google.protobuf.Timestamp + 12, // 4: SandboxCreateRequest.end_time:type_name -> google.protobuf.Timestamp + 12, // 5: SandboxUpdateRequest.end_time:type_name -> google.protobuf.Timestamp 0, // 6: RunningSandbox.config:type_name -> SandboxConfig - 15, // 7: RunningSandbox.start_time:type_name -> google.protobuf.Timestamp - 15, // 8: RunningSandbox.end_time:type_name -> google.protobuf.Timestamp + 12, // 7: RunningSandbox.start_time:type_name -> google.protobuf.Timestamp + 12, // 8: RunningSandbox.end_time:type_name -> google.protobuf.Timestamp 6, // 9: SandboxListResponse.sandboxes:type_name -> RunningSandbox - 15, // 10: CachedBuildInfo.expiration_time:type_name -> google.protobuf.Timestamp + 12, // 10: CachedBuildInfo.expiration_time:type_name -> google.protobuf.Timestamp 8, // 11: SandboxListCachedBuildsResponse.builds:type_name -> CachedBuildInfo - 14, // 12: SandboxCmdRequest.env:type_name -> SandboxCmdRequest.EnvEntry - 1, // 13: SandboxService.Create:input_type -> SandboxCreateRequest - 3, // 14: SandboxService.Update:input_type -> SandboxUpdateRequest - 16, // 15: SandboxService.List:input_type -> google.protobuf.Empty - 4, // 16: SandboxService.Delete:input_type -> SandboxDeleteRequest - 5, // 17: SandboxService.Pause:input_type -> SandboxPauseRequest - 10, // 18: SandboxService.Cmd:input_type -> SandboxCmdRequest - 16, // 19: SandboxService.ListCachedBuilds:input_type -> google.protobuf.Empty - 2, // 20: SandboxService.Create:output_type -> SandboxCreateResponse - 16, // 21: SandboxService.Update:output_type -> google.protobuf.Empty - 7, // 22: SandboxService.List:output_type -> SandboxListResponse - 16, // 23: SandboxService.Delete:output_type -> google.protobuf.Empty - 16, // 24: SandboxService.Pause:output_type -> google.protobuf.Empty - 11, // 25: SandboxService.Cmd:output_type -> SandboxCmdResponse - 9, // 26: SandboxService.ListCachedBuilds:output_type -> SandboxListCachedBuildsResponse - 20, // [20:27] is the sub-list for method output_type - 13, // [13:20] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name -} - -func init() { file_packages_orchestrator_orchestrator_proto_init() } -func file_packages_orchestrator_orchestrator_proto_init() { - if File_packages_orchestrator_orchestrator_proto != nil { + 1, // 12: SandboxService.Create:input_type -> SandboxCreateRequest + 3, // 13: SandboxService.Update:input_type -> SandboxUpdateRequest + 13, // 14: SandboxService.List:input_type -> google.protobuf.Empty + 4, // 15: SandboxService.Delete:input_type -> SandboxDeleteRequest + 5, // 16: SandboxService.Pause:input_type -> SandboxPauseRequest + 13, // 17: SandboxService.ListCachedBuilds:input_type -> google.protobuf.Empty + 2, // 18: SandboxService.Create:output_type -> SandboxCreateResponse + 13, // 19: SandboxService.Update:output_type -> google.protobuf.Empty + 7, // 20: SandboxService.List:output_type -> SandboxListResponse + 13, // 21: SandboxService.Delete:output_type -> google.protobuf.Empty + 13, // 22: SandboxService.Pause:output_type -> google.protobuf.Empty + 9, // 23: SandboxService.ListCachedBuilds:output_type -> SandboxListCachedBuildsResponse + 18, // [18:24] is the sub-list for method output_type + 12, // [12:18] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_orchestrator_proto_init() } +func file_orchestrator_proto_init() { + if File_orchestrator_proto != nil { return } - file_packages_orchestrator_orchestrator_proto_msgTypes[0].OneofWrappers = []any{} + if !protoimpl.UnsafeEnabled { + file_orchestrator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxCreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxPauseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunningSandbox); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CachedBuildInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_orchestrator_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxListCachedBuildsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_orchestrator_proto_msgTypes[0].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_packages_orchestrator_orchestrator_proto_rawDesc), len(file_packages_orchestrator_orchestrator_proto_rawDesc)), + RawDescriptor: file_orchestrator_proto_rawDesc, NumEnums: 0, - NumMessages: 15, + NumMessages: 12, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_packages_orchestrator_orchestrator_proto_goTypes, - DependencyIndexes: file_packages_orchestrator_orchestrator_proto_depIdxs, - MessageInfos: file_packages_orchestrator_orchestrator_proto_msgTypes, + GoTypes: file_orchestrator_proto_goTypes, + DependencyIndexes: file_orchestrator_proto_depIdxs, + MessageInfos: file_orchestrator_proto_msgTypes, }.Build() - File_packages_orchestrator_orchestrator_proto = out.File - file_packages_orchestrator_orchestrator_proto_goTypes = nil - file_packages_orchestrator_orchestrator_proto_depIdxs = nil + File_orchestrator_proto = out.File + file_orchestrator_proto_rawDesc = nil + file_orchestrator_proto_goTypes = nil + file_orchestrator_proto_depIdxs = nil } diff --git a/packages/shared/pkg/grpc/orchestrator/packages/orchestrator/orchestrator_grpc.pb.go b/packages/shared/pkg/grpc/orchestrator/packages/orchestrator/orchestrator_grpc.pb.go deleted file mode 100644 index 8667701..0000000 --- a/packages/shared/pkg/grpc/orchestrator/packages/orchestrator/orchestrator_grpc.pb.go +++ /dev/null @@ -1,350 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v3.19.6 -// source: packages/orchestrator/orchestrator.proto - -package orchestrator - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - SandboxService_Create_FullMethodName = "/SandboxService/Create" - SandboxService_Update_FullMethodName = "/SandboxService/Update" - SandboxService_List_FullMethodName = "/SandboxService/List" - SandboxService_Delete_FullMethodName = "/SandboxService/Delete" - SandboxService_Pause_FullMethodName = "/SandboxService/Pause" - SandboxService_Cmd_FullMethodName = "/SandboxService/Cmd" - SandboxService_ListCachedBuilds_FullMethodName = "/SandboxService/ListCachedBuilds" -) - -// SandboxServiceClient is the client API for SandboxService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SandboxServiceClient interface { - Create(ctx context.Context, in *SandboxCreateRequest, opts ...grpc.CallOption) (*SandboxCreateResponse, error) - Update(ctx context.Context, in *SandboxUpdateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SandboxListResponse, error) - Delete(ctx context.Context, in *SandboxDeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Pause(ctx context.Context, in *SandboxPauseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - Cmd(ctx context.Context, in *SandboxCmdRequest, opts ...grpc.CallOption) (*SandboxCmdResponse, error) - ListCachedBuilds(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SandboxListCachedBuildsResponse, error) -} - -type sandboxServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewSandboxServiceClient(cc grpc.ClientConnInterface) SandboxServiceClient { - return &sandboxServiceClient{cc} -} - -func (c *sandboxServiceClient) Create(ctx context.Context, in *SandboxCreateRequest, opts ...grpc.CallOption) (*SandboxCreateResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SandboxCreateResponse) - err := c.cc.Invoke(ctx, SandboxService_Create_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) Update(ctx context.Context, in *SandboxUpdateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, SandboxService_Update_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) List(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SandboxListResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SandboxListResponse) - err := c.cc.Invoke(ctx, SandboxService_List_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) Delete(ctx context.Context, in *SandboxDeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, SandboxService_Delete_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) Pause(ctx context.Context, in *SandboxPauseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, SandboxService_Pause_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) Cmd(ctx context.Context, in *SandboxCmdRequest, opts ...grpc.CallOption) (*SandboxCmdResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SandboxCmdResponse) - err := c.cc.Invoke(ctx, SandboxService_Cmd_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sandboxServiceClient) ListCachedBuilds(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SandboxListCachedBuildsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SandboxListCachedBuildsResponse) - err := c.cc.Invoke(ctx, SandboxService_ListCachedBuilds_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SandboxServiceServer is the server API for SandboxService service. -// All implementations must embed UnimplementedSandboxServiceServer -// for forward compatibility. -type SandboxServiceServer interface { - Create(context.Context, *SandboxCreateRequest) (*SandboxCreateResponse, error) - Update(context.Context, *SandboxUpdateRequest) (*emptypb.Empty, error) - List(context.Context, *emptypb.Empty) (*SandboxListResponse, error) - Delete(context.Context, *SandboxDeleteRequest) (*emptypb.Empty, error) - Pause(context.Context, *SandboxPauseRequest) (*emptypb.Empty, error) - Cmd(context.Context, *SandboxCmdRequest) (*SandboxCmdResponse, error) - ListCachedBuilds(context.Context, *emptypb.Empty) (*SandboxListCachedBuildsResponse, error) - mustEmbedUnimplementedSandboxServiceServer() -} - -// UnimplementedSandboxServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedSandboxServiceServer struct{} - -func (UnimplementedSandboxServiceServer) Create(context.Context, *SandboxCreateRequest) (*SandboxCreateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (UnimplementedSandboxServiceServer) Update(context.Context, *SandboxUpdateRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (UnimplementedSandboxServiceServer) List(context.Context, *emptypb.Empty) (*SandboxListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (UnimplementedSandboxServiceServer) Delete(context.Context, *SandboxDeleteRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (UnimplementedSandboxServiceServer) Pause(context.Context, *SandboxPauseRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") -} -func (UnimplementedSandboxServiceServer) Cmd(context.Context, *SandboxCmdRequest) (*SandboxCmdResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Cmd not implemented") -} -func (UnimplementedSandboxServiceServer) ListCachedBuilds(context.Context, *emptypb.Empty) (*SandboxListCachedBuildsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListCachedBuilds not implemented") -} -func (UnimplementedSandboxServiceServer) mustEmbedUnimplementedSandboxServiceServer() {} -func (UnimplementedSandboxServiceServer) testEmbeddedByValue() {} - -// UnsafeSandboxServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SandboxServiceServer will -// result in compilation errors. -type UnsafeSandboxServiceServer interface { - mustEmbedUnimplementedSandboxServiceServer() -} - -func RegisterSandboxServiceServer(s grpc.ServiceRegistrar, srv SandboxServiceServer) { - // If the following call pancis, it indicates UnimplementedSandboxServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&SandboxService_ServiceDesc, srv) -} - -func _SandboxService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SandboxCreateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_Create_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).Create(ctx, req.(*SandboxCreateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SandboxUpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_Update_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).Update(ctx, req.(*SandboxUpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_List_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).List(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SandboxDeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_Delete_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).Delete(ctx, req.(*SandboxDeleteRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SandboxPauseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).Pause(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_Pause_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).Pause(ctx, req.(*SandboxPauseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_Cmd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SandboxCmdRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).Cmd(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_Cmd_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).Cmd(ctx, req.(*SandboxCmdRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SandboxService_ListCachedBuilds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SandboxServiceServer).ListCachedBuilds(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: SandboxService_ListCachedBuilds_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SandboxServiceServer).ListCachedBuilds(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// SandboxService_ServiceDesc is the grpc.ServiceDesc for SandboxService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SandboxService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "SandboxService", - HandlerType: (*SandboxServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _SandboxService_Create_Handler, - }, - { - MethodName: "Update", - Handler: _SandboxService_Update_Handler, - }, - { - MethodName: "List", - Handler: _SandboxService_List_Handler, - }, - { - MethodName: "Delete", - Handler: _SandboxService_Delete_Handler, - }, - { - MethodName: "Pause", - Handler: _SandboxService_Pause_Handler, - }, - { - MethodName: "Cmd", - Handler: _SandboxService_Cmd_Handler, - }, - { - MethodName: "ListCachedBuilds", - Handler: _SandboxService_ListCachedBuilds_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "packages/orchestrator/orchestrator.proto", -} diff --git a/packages/shared/pkg/grpc/template-manager/template-manager.pb.go b/packages/shared/pkg/grpc/template-manager/template-manager.pb.go index c37ee65..327851d 100644 --- a/packages/shared/pkg/grpc/template-manager/template-manager.pb.go +++ b/packages/shared/pkg/grpc/template-manager/template-manager.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.5 -// protoc v6.30.1 -// source: template-manager/template-manager.proto +// protoc-gen-go v1.28.1 +// protoc v5.29.3 +// source: template-manager.proto package template_manager @@ -12,7 +12,6 @@ import ( emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" sync "sync" - unsafe "unsafe" ) const ( @@ -22,26 +21,125 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type TemplateBuildState int32 + +const ( + TemplateBuildState_Building TemplateBuildState = 0 + TemplateBuildState_Failed TemplateBuildState = 1 + TemplateBuildState_Completed TemplateBuildState = 2 +) + +// Enum value maps for TemplateBuildState. +var ( + TemplateBuildState_name = map[int32]string{ + 0: "Building", + 1: "Failed", + 2: "Completed", + } + TemplateBuildState_value = map[string]int32{ + "Building": 0, + "Failed": 1, + "Completed": 2, + } +) + +func (x TemplateBuildState) Enum() *TemplateBuildState { + p := new(TemplateBuildState) + *p = x + return p +} + +func (x TemplateBuildState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TemplateBuildState) Descriptor() protoreflect.EnumDescriptor { + return file_template_manager_proto_enumTypes[0].Descriptor() +} + +func (TemplateBuildState) Type() protoreflect.EnumType { + return &file_template_manager_proto_enumTypes[0] +} + +func (x TemplateBuildState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TemplateBuildState.Descriptor instead. +func (TemplateBuildState) EnumDescriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{0} +} + +type HealthState int32 + +const ( + HealthState_Healthy HealthState = 0 + HealthState_Draining HealthState = 1 +) + +// Enum value maps for HealthState. +var ( + HealthState_name = map[int32]string{ + 0: "Healthy", + 1: "Draining", + } + HealthState_value = map[string]int32{ + "Healthy": 0, + "Draining": 1, + } +) + +func (x HealthState) Enum() *HealthState { + p := new(HealthState) + *p = x + return p +} + +func (x HealthState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthState) Descriptor() protoreflect.EnumDescriptor { + return file_template_manager_proto_enumTypes[1].Descriptor() +} + +func (HealthState) Type() protoreflect.EnumType { + return &file_template_manager_proto_enumTypes[1] +} + +func (x HealthState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthState.Descriptor instead. +func (HealthState) EnumDescriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{1} +} + type TemplateConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - TemplateID string `protobuf:"bytes,1,opt,name=templateID,proto3" json:"templateID,omitempty"` - BuildID string `protobuf:"bytes,2,opt,name=buildID,proto3" json:"buildID,omitempty"` - MemoryMB int32 `protobuf:"varint,3,opt,name=memoryMB,proto3" json:"memoryMB,omitempty"` - VCpuCount int32 `protobuf:"varint,4,opt,name=vCpuCount,proto3" json:"vCpuCount,omitempty"` - DiskSizeMB int32 `protobuf:"varint,5,opt,name=diskSizeMB,proto3" json:"diskSizeMB,omitempty"` - KernelVersion string `protobuf:"bytes,6,opt,name=kernelVersion,proto3" json:"kernelVersion,omitempty"` - FirecrackerVersion string `protobuf:"bytes,7,opt,name=firecrackerVersion,proto3" json:"firecrackerVersion,omitempty"` - StartCommand string `protobuf:"bytes,8,opt,name=startCommand,proto3" json:"startCommand,omitempty"` - HugePages bool `protobuf:"varint,9,opt,name=hugePages,proto3" json:"hugePages,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TemplateID string `protobuf:"bytes,1,opt,name=templateID,proto3" json:"templateID,omitempty"` + BuildID string `protobuf:"bytes,2,opt,name=buildID,proto3" json:"buildID,omitempty"` + MemoryMB int32 `protobuf:"varint,3,opt,name=memoryMB,proto3" json:"memoryMB,omitempty"` + VCpuCount int32 `protobuf:"varint,4,opt,name=vCpuCount,proto3" json:"vCpuCount,omitempty"` + DiskSizeMB int32 `protobuf:"varint,5,opt,name=diskSizeMB,proto3" json:"diskSizeMB,omitempty"` + KernelVersion string `protobuf:"bytes,6,opt,name=kernelVersion,proto3" json:"kernelVersion,omitempty"` + FirecrackerVersion string `protobuf:"bytes,7,opt,name=firecrackerVersion,proto3" json:"firecrackerVersion,omitempty"` + StartCommand string `protobuf:"bytes,8,opt,name=startCommand,proto3" json:"startCommand,omitempty"` + HugePages bool `protobuf:"varint,9,opt,name=hugePages,proto3" json:"hugePages,omitempty"` + ReadyCommand string `protobuf:"bytes,10,opt,name=readyCommand,proto3" json:"readyCommand,omitempty"` } func (x *TemplateConfig) Reset() { *x = TemplateConfig{} - mi := &file_template_manager_template_manager_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *TemplateConfig) String() string { @@ -51,8 +149,8 @@ func (x *TemplateConfig) String() string { func (*TemplateConfig) ProtoMessage() {} func (x *TemplateConfig) ProtoReflect() protoreflect.Message { - mi := &file_template_manager_template_manager_proto_msgTypes[0] - if x != nil { + mi := &file_template_manager_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -64,7 +162,7 @@ func (x *TemplateConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use TemplateConfig.ProtoReflect.Descriptor instead. func (*TemplateConfig) Descriptor() ([]byte, []int) { - return file_template_manager_template_manager_proto_rawDescGZIP(), []int{0} + return file_template_manager_proto_rawDescGZIP(), []int{0} } func (x *TemplateConfig) GetTemplateID() string { @@ -130,18 +228,28 @@ func (x *TemplateConfig) GetHugePages() bool { return false } +func (x *TemplateConfig) GetReadyCommand() string { + if x != nil { + return x.ReadyCommand + } + return "" +} + type TemplateCreateRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Template *TemplateConfig `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Template *TemplateConfig `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"` } func (x *TemplateCreateRequest) Reset() { *x = TemplateCreateRequest{} - mi := &file_template_manager_template_manager_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *TemplateCreateRequest) String() string { @@ -151,8 +259,8 @@ func (x *TemplateCreateRequest) String() string { func (*TemplateCreateRequest) ProtoMessage() {} func (x *TemplateCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_template_manager_template_manager_proto_msgTypes[1] - if x != nil { + mi := &file_template_manager_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -164,7 +272,7 @@ func (x *TemplateCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TemplateCreateRequest.ProtoReflect.Descriptor instead. func (*TemplateCreateRequest) Descriptor() ([]byte, []int) { - return file_template_manager_template_manager_proto_rawDescGZIP(), []int{1} + return file_template_manager_proto_rawDescGZIP(), []int{1} } func (x *TemplateCreateRequest) GetTemplate() *TemplateConfig { @@ -174,20 +282,78 @@ func (x *TemplateCreateRequest) GetTemplate() *TemplateConfig { return nil } +type TemplateStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TemplateID string `protobuf:"bytes,1,opt,name=templateID,proto3" json:"templateID,omitempty"` + BuildID string `protobuf:"bytes,2,opt,name=buildID,proto3" json:"buildID,omitempty"` +} + +func (x *TemplateStatusRequest) Reset() { + *x = TemplateStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemplateStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemplateStatusRequest) ProtoMessage() {} + +func (x *TemplateStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_template_manager_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemplateStatusRequest.ProtoReflect.Descriptor instead. +func (*TemplateStatusRequest) Descriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{2} +} + +func (x *TemplateStatusRequest) GetTemplateID() string { + if x != nil { + return x.TemplateID + } + return "" +} + +func (x *TemplateStatusRequest) GetBuildID() string { + if x != nil { + return x.BuildID + } + return "" +} + // Data required for deleting a template. type TemplateBuildDeleteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - BuildID string `protobuf:"bytes,1,opt,name=buildID,proto3" json:"buildID,omitempty"` - TemplateID string `protobuf:"bytes,2,opt,name=templateID,proto3" json:"templateID,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BuildID string `protobuf:"bytes,1,opt,name=buildID,proto3" json:"buildID,omitempty"` + TemplateID string `protobuf:"bytes,2,opt,name=templateID,proto3" json:"templateID,omitempty"` } func (x *TemplateBuildDeleteRequest) Reset() { *x = TemplateBuildDeleteRequest{} - mi := &file_template_manager_template_manager_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } func (x *TemplateBuildDeleteRequest) String() string { @@ -197,8 +363,8 @@ func (x *TemplateBuildDeleteRequest) String() string { func (*TemplateBuildDeleteRequest) ProtoMessage() {} func (x *TemplateBuildDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_template_manager_template_manager_proto_msgTypes[2] - if x != nil { + mi := &file_template_manager_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -210,7 +376,7 @@ func (x *TemplateBuildDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TemplateBuildDeleteRequest.ProtoReflect.Descriptor instead. func (*TemplateBuildDeleteRequest) Descriptor() ([]byte, []int) { - return file_template_manager_template_manager_proto_rawDescGZIP(), []int{2} + return file_template_manager_proto_rawDescGZIP(), []int{3} } func (x *TemplateBuildDeleteRequest) GetBuildID() string { @@ -227,30 +393,33 @@ func (x *TemplateBuildDeleteRequest) GetTemplateID() string { return "" } -// Logs from template build -type TemplateBuildLog struct { - state protoimpl.MessageState `protogen:"open.v1"` - Log string `protobuf:"bytes,1,opt,name=log,proto3" json:"log,omitempty"` - unknownFields protoimpl.UnknownFields +type TemplateBuildMetadata struct { + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RootfsSizeKey int32 `protobuf:"varint,1,opt,name=rootfsSizeKey,proto3" json:"rootfsSizeKey,omitempty"` + EnvdVersionKey string `protobuf:"bytes,2,opt,name=envdVersionKey,proto3" json:"envdVersionKey,omitempty"` } -func (x *TemplateBuildLog) Reset() { - *x = TemplateBuildLog{} - mi := &file_template_manager_template_manager_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *TemplateBuildMetadata) Reset() { + *x = TemplateBuildMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (x *TemplateBuildLog) String() string { +func (x *TemplateBuildMetadata) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TemplateBuildLog) ProtoMessage() {} +func (*TemplateBuildMetadata) ProtoMessage() {} -func (x *TemplateBuildLog) ProtoReflect() protoreflect.Message { - mi := &file_template_manager_template_manager_proto_msgTypes[3] - if x != nil { +func (x *TemplateBuildMetadata) ProtoReflect() protoreflect.Message { + mi := &file_template_manager_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,127 +429,376 @@ func (x *TemplateBuildLog) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TemplateBuildLog.ProtoReflect.Descriptor instead. -func (*TemplateBuildLog) Descriptor() ([]byte, []int) { - return file_template_manager_template_manager_proto_rawDescGZIP(), []int{3} +// Deprecated: Use TemplateBuildMetadata.ProtoReflect.Descriptor instead. +func (*TemplateBuildMetadata) Descriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{4} } -func (x *TemplateBuildLog) GetLog() string { +func (x *TemplateBuildMetadata) GetRootfsSizeKey() int32 { if x != nil { - return x.Log + return x.RootfsSizeKey + } + return 0 +} + +func (x *TemplateBuildMetadata) GetEnvdVersionKey() string { + if x != nil { + return x.EnvdVersionKey } return "" } -var File_template_manager_template_manager_proto protoreflect.FileDescriptor - -var file_template_manager_template_manager_proto_rawDesc = string([]byte{ - 0x0a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x2f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x02, 0x0a, 0x0e, 0x54, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x42, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x42, 0x12, - 0x1c, 0x0a, 0x09, 0x76, 0x43, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x09, 0x76, 0x43, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, - 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x42, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x42, 0x12, 0x24, 0x0a, - 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x75, 0x67, 0x65, 0x50, - 0x61, 0x67, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x75, 0x67, 0x65, - 0x50, 0x61, 0x67, 0x65, 0x73, 0x22, 0x44, 0x0a, 0x15, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x56, 0x0a, 0x1a, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, - 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x49, 0x44, 0x22, 0x24, 0x0a, 0x10, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x32, 0x9c, 0x01, 0x0a, 0x0f, 0x54, 0x65, - 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3d, 0x0a, - 0x0e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, - 0x16, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4c, 0x6f, 0x67, 0x30, 0x01, 0x12, 0x4a, 0x0a, 0x13, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x12, 0x1b, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x33, 0x5a, 0x31, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, - 0x32, 0x62, 0x2d, 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2f, 0x74, 0x65, 0x6d, - 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) +// Logs from template build +type TemplateBuildStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status TemplateBuildState `protobuf:"varint,1,opt,name=status,proto3,enum=TemplateBuildState" json:"status,omitempty"` + Metadata *TemplateBuildMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *TemplateBuildStatusResponse) Reset() { + *x = TemplateBuildStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TemplateBuildStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TemplateBuildStatusResponse) ProtoMessage() {} + +func (x *TemplateBuildStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_template_manager_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TemplateBuildStatusResponse.ProtoReflect.Descriptor instead. +func (*TemplateBuildStatusResponse) Descriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{5} +} + +func (x *TemplateBuildStatusResponse) GetStatus() TemplateBuildState { + if x != nil { + return x.Status + } + return TemplateBuildState_Building +} + +func (x *TemplateBuildStatusResponse) GetMetadata() *TemplateBuildMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +type HealthStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthState `protobuf:"varint,1,opt,name=status,proto3,enum=HealthState" json:"status,omitempty"` +} + +func (x *HealthStatusResponse) Reset() { + *x = HealthStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_template_manager_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthStatusResponse) ProtoMessage() {} + +func (x *HealthStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_template_manager_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthStatusResponse.ProtoReflect.Descriptor instead. +func (*HealthStatusResponse) Descriptor() ([]byte, []int) { + return file_template_manager_proto_rawDescGZIP(), []int{6} +} + +func (x *HealthStatusResponse) GetStatus() HealthState { + if x != nil { + return x.Status + } + return HealthState_Healthy +} + +var File_template_manager_proto protoreflect.FileDescriptor + +var file_template_manager_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x02, 0x0a, 0x0e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x42, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4d, 0x42, 0x12, 0x1c, + 0x0a, 0x09, 0x76, 0x43, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x76, 0x43, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, + 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x42, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x4d, 0x42, 0x12, 0x24, 0x0a, 0x0d, + 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, + 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x66, 0x69, 0x72, 0x65, 0x63, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x75, 0x67, 0x65, 0x50, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x75, 0x67, 0x65, 0x50, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x43, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x22, 0x44, 0x0a, 0x15, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2b, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x51, + 0x0a, 0x15, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, + 0x44, 0x22, 0x56, 0x0a, 0x1a, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, + 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x49, 0x44, 0x22, 0x65, 0x0a, 0x15, 0x54, 0x65, 0x6d, + 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x53, 0x69, 0x7a, 0x65, + 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x72, 0x6f, 0x6f, 0x74, 0x66, + 0x73, 0x53, 0x69, 0x7a, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x65, 0x6e, 0x76, 0x64, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x65, 0x6e, 0x76, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x22, 0x7e, 0x0a, 0x1b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0x3c, 0x0a, 0x14, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x3d, + 0x0a, 0x12, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x10, 0x02, 0x2a, 0x28, 0x0a, + 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x72, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x32, 0xab, 0x02, 0x0a, 0x0f, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x2e, + 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4b, 0x0a, + 0x13, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x54, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x1b, 0x2e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x33, 0x5a, 0x31, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x32, 0x62, 0x2d, + 0x64, 0x65, 0x76, 0x2f, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, + 0x74, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} var ( - file_template_manager_template_manager_proto_rawDescOnce sync.Once - file_template_manager_template_manager_proto_rawDescData []byte + file_template_manager_proto_rawDescOnce sync.Once + file_template_manager_proto_rawDescData = file_template_manager_proto_rawDesc ) -func file_template_manager_template_manager_proto_rawDescGZIP() []byte { - file_template_manager_template_manager_proto_rawDescOnce.Do(func() { - file_template_manager_template_manager_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_template_manager_template_manager_proto_rawDesc), len(file_template_manager_template_manager_proto_rawDesc))) +func file_template_manager_proto_rawDescGZIP() []byte { + file_template_manager_proto_rawDescOnce.Do(func() { + file_template_manager_proto_rawDescData = protoimpl.X.CompressGZIP(file_template_manager_proto_rawDescData) }) - return file_template_manager_template_manager_proto_rawDescData -} - -var file_template_manager_template_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_template_manager_template_manager_proto_goTypes = []any{ - (*TemplateConfig)(nil), // 0: TemplateConfig - (*TemplateCreateRequest)(nil), // 1: TemplateCreateRequest - (*TemplateBuildDeleteRequest)(nil), // 2: TemplateBuildDeleteRequest - (*TemplateBuildLog)(nil), // 3: TemplateBuildLog - (*emptypb.Empty)(nil), // 4: google.protobuf.Empty -} -var file_template_manager_template_manager_proto_depIdxs = []int32{ - 0, // 0: TemplateCreateRequest.template:type_name -> TemplateConfig - 1, // 1: TemplateService.TemplateCreate:input_type -> TemplateCreateRequest - 2, // 2: TemplateService.TemplateBuildDelete:input_type -> TemplateBuildDeleteRequest - 3, // 3: TemplateService.TemplateCreate:output_type -> TemplateBuildLog - 4, // 4: TemplateService.TemplateBuildDelete:output_type -> google.protobuf.Empty - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_template_manager_template_manager_proto_init() } -func file_template_manager_template_manager_proto_init() { - if File_template_manager_template_manager_proto != nil { + return file_template_manager_proto_rawDescData +} + +var file_template_manager_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_template_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_template_manager_proto_goTypes = []interface{}{ + (TemplateBuildState)(0), // 0: TemplateBuildState + (HealthState)(0), // 1: HealthState + (*TemplateConfig)(nil), // 2: TemplateConfig + (*TemplateCreateRequest)(nil), // 3: TemplateCreateRequest + (*TemplateStatusRequest)(nil), // 4: TemplateStatusRequest + (*TemplateBuildDeleteRequest)(nil), // 5: TemplateBuildDeleteRequest + (*TemplateBuildMetadata)(nil), // 6: TemplateBuildMetadata + (*TemplateBuildStatusResponse)(nil), // 7: TemplateBuildStatusResponse + (*HealthStatusResponse)(nil), // 8: HealthStatusResponse + (*emptypb.Empty)(nil), // 9: google.protobuf.Empty +} +var file_template_manager_proto_depIdxs = []int32{ + 2, // 0: TemplateCreateRequest.template:type_name -> TemplateConfig + 0, // 1: TemplateBuildStatusResponse.status:type_name -> TemplateBuildState + 6, // 2: TemplateBuildStatusResponse.metadata:type_name -> TemplateBuildMetadata + 1, // 3: HealthStatusResponse.status:type_name -> HealthState + 3, // 4: TemplateService.TemplateCreate:input_type -> TemplateCreateRequest + 4, // 5: TemplateService.TemplateBuildStatus:input_type -> TemplateStatusRequest + 5, // 6: TemplateService.TemplateBuildDelete:input_type -> TemplateBuildDeleteRequest + 9, // 7: TemplateService.HealthStatus:input_type -> google.protobuf.Empty + 9, // 8: TemplateService.TemplateCreate:output_type -> google.protobuf.Empty + 7, // 9: TemplateService.TemplateBuildStatus:output_type -> TemplateBuildStatusResponse + 9, // 10: TemplateService.TemplateBuildDelete:output_type -> google.protobuf.Empty + 8, // 11: TemplateService.HealthStatus:output_type -> HealthStatusResponse + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_template_manager_proto_init() } +func file_template_manager_proto_init() { + if File_template_manager_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_template_manager_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateBuildDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateBuildMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TemplateBuildStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_template_manager_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HealthStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_template_manager_template_manager_proto_rawDesc), len(file_template_manager_template_manager_proto_rawDesc)), - NumEnums: 0, - NumMessages: 4, + RawDescriptor: file_template_manager_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_template_manager_template_manager_proto_goTypes, - DependencyIndexes: file_template_manager_template_manager_proto_depIdxs, - MessageInfos: file_template_manager_template_manager_proto_msgTypes, + GoTypes: file_template_manager_proto_goTypes, + DependencyIndexes: file_template_manager_proto_depIdxs, + EnumInfos: file_template_manager_proto_enumTypes, + MessageInfos: file_template_manager_proto_msgTypes, }.Build() - File_template_manager_template_manager_proto = out.File - file_template_manager_template_manager_proto_goTypes = nil - file_template_manager_template_manager_proto_depIdxs = nil + File_template_manager_proto = out.File + file_template_manager_proto_rawDesc = nil + file_template_manager_proto_goTypes = nil + file_template_manager_proto_depIdxs = nil } diff --git a/packages/shared/pkg/grpc/template-manager/template-manager_grpc.pb.go b/packages/shared/pkg/grpc/template-manager/template-manager_grpc.pb.go index 1734c27..978764a 100644 --- a/packages/shared/pkg/grpc/template-manager/template-manager_grpc.pb.go +++ b/packages/shared/pkg/grpc/template-manager/template-manager_grpc.pb.go @@ -1,8 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v6.30.1 -// source: template-manager/template-manager.proto +// - protoc-gen-go-grpc v1.2.0 +// - protoc v5.29.3 +// source: template-manager.proto package template_manager @@ -16,24 +16,21 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - TemplateService_TemplateCreate_FullMethodName = "/TemplateService/TemplateCreate" - TemplateService_TemplateBuildDelete_FullMethodName = "/TemplateService/TemplateBuildDelete" -) +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 // TemplateServiceClient is the client API for TemplateService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -// -// Interface exported by the server. type TemplateServiceClient interface { // TemplateCreate is a gRPC service that creates a new template - TemplateCreate(ctx context.Context, in *TemplateCreateRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TemplateBuildLog], error) + TemplateCreate(ctx context.Context, in *TemplateCreateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // TemplateStatus is a gRPC service that streams the status of a template build + TemplateBuildStatus(ctx context.Context, in *TemplateStatusRequest, opts ...grpc.CallOption) (*TemplateBuildStatusResponse, error) // TemplateBuildDelete is a gRPC service that deletes files associated with a template build TemplateBuildDelete(ctx context.Context, in *TemplateBuildDeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // todo (2025-05): this is deprecated, please use InfoService that is used for both orchestrator and template manager + HealthStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HealthStatusResponse, error) } type templateServiceClient struct { @@ -44,29 +41,36 @@ func NewTemplateServiceClient(cc grpc.ClientConnInterface) TemplateServiceClient return &templateServiceClient{cc} } -func (c *templateServiceClient) TemplateCreate(ctx context.Context, in *TemplateCreateRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[TemplateBuildLog], error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &TemplateService_ServiceDesc.Streams[0], TemplateService_TemplateCreate_FullMethodName, cOpts...) +func (c *templateServiceClient) TemplateCreate(ctx context.Context, in *TemplateCreateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/TemplateService/TemplateCreate", in, out, opts...) if err != nil { return nil, err } - x := &grpc.GenericClientStream[TemplateCreateRequest, TemplateBuildLog]{ClientStream: stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { + return out, nil +} + +func (c *templateServiceClient) TemplateBuildStatus(ctx context.Context, in *TemplateStatusRequest, opts ...grpc.CallOption) (*TemplateBuildStatusResponse, error) { + out := new(TemplateBuildStatusResponse) + err := c.cc.Invoke(ctx, "/TemplateService/TemplateBuildStatus", in, out, opts...) + if err != nil { return nil, err } - return x, nil + return out, nil } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type TemplateService_TemplateCreateClient = grpc.ServerStreamingClient[TemplateBuildLog] - func (c *templateServiceClient) TemplateBuildDelete(ctx context.Context, in *TemplateBuildDeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, TemplateService_TemplateBuildDelete_FullMethodName, in, out, cOpts...) + err := c.cc.Invoke(ctx, "/TemplateService/TemplateBuildDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *templateServiceClient) HealthStatus(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HealthStatusResponse, error) { + out := new(HealthStatusResponse) + err := c.cc.Invoke(ctx, "/TemplateService/HealthStatus", in, out, opts...) if err != nil { return nil, err } @@ -75,32 +79,36 @@ func (c *templateServiceClient) TemplateBuildDelete(ctx context.Context, in *Tem // TemplateServiceServer is the server API for TemplateService service. // All implementations must embed UnimplementedTemplateServiceServer -// for forward compatibility. -// -// Interface exported by the server. +// for forward compatibility type TemplateServiceServer interface { // TemplateCreate is a gRPC service that creates a new template - TemplateCreate(*TemplateCreateRequest, grpc.ServerStreamingServer[TemplateBuildLog]) error + TemplateCreate(context.Context, *TemplateCreateRequest) (*emptypb.Empty, error) + // TemplateStatus is a gRPC service that streams the status of a template build + TemplateBuildStatus(context.Context, *TemplateStatusRequest) (*TemplateBuildStatusResponse, error) // TemplateBuildDelete is a gRPC service that deletes files associated with a template build TemplateBuildDelete(context.Context, *TemplateBuildDeleteRequest) (*emptypb.Empty, error) + // todo (2025-05): this is deprecated, please use InfoService that is used for both orchestrator and template manager + HealthStatus(context.Context, *emptypb.Empty) (*HealthStatusResponse, error) mustEmbedUnimplementedTemplateServiceServer() } -// UnimplementedTemplateServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedTemplateServiceServer struct{} +// UnimplementedTemplateServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTemplateServiceServer struct { +} -func (UnimplementedTemplateServiceServer) TemplateCreate(*TemplateCreateRequest, grpc.ServerStreamingServer[TemplateBuildLog]) error { - return status.Errorf(codes.Unimplemented, "method TemplateCreate not implemented") +func (UnimplementedTemplateServiceServer) TemplateCreate(context.Context, *TemplateCreateRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method TemplateCreate not implemented") +} +func (UnimplementedTemplateServiceServer) TemplateBuildStatus(context.Context, *TemplateStatusRequest) (*TemplateBuildStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TemplateBuildStatus not implemented") } func (UnimplementedTemplateServiceServer) TemplateBuildDelete(context.Context, *TemplateBuildDeleteRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method TemplateBuildDelete not implemented") } +func (UnimplementedTemplateServiceServer) HealthStatus(context.Context, *emptypb.Empty) (*HealthStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HealthStatus not implemented") +} func (UnimplementedTemplateServiceServer) mustEmbedUnimplementedTemplateServiceServer() {} -func (UnimplementedTemplateServiceServer) testEmbeddedByValue() {} // UnsafeTemplateServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to TemplateServiceServer will @@ -110,26 +118,44 @@ type UnsafeTemplateServiceServer interface { } func RegisterTemplateServiceServer(s grpc.ServiceRegistrar, srv TemplateServiceServer) { - // If the following call pancis, it indicates UnimplementedTemplateServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } s.RegisterService(&TemplateService_ServiceDesc, srv) } -func _TemplateService_TemplateCreate_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(TemplateCreateRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func _TemplateService_TemplateCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TemplateCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemplateServiceServer).TemplateCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/TemplateService/TemplateCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemplateServiceServer).TemplateCreate(ctx, req.(*TemplateCreateRequest)) } - return srv.(TemplateServiceServer).TemplateCreate(m, &grpc.GenericServerStream[TemplateCreateRequest, TemplateBuildLog]{ServerStream: stream}) + return interceptor(ctx, in, info, handler) } -// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. -type TemplateService_TemplateCreateServer = grpc.ServerStreamingServer[TemplateBuildLog] +func _TemplateService_TemplateBuildStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TemplateStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemplateServiceServer).TemplateBuildStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/TemplateService/TemplateBuildStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemplateServiceServer).TemplateBuildStatus(ctx, req.(*TemplateStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} func _TemplateService_TemplateBuildDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(TemplateBuildDeleteRequest) @@ -141,7 +167,7 @@ func _TemplateService_TemplateBuildDelete_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: TemplateService_TemplateBuildDelete_FullMethodName, + FullMethod: "/TemplateService/TemplateBuildDelete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TemplateServiceServer).TemplateBuildDelete(ctx, req.(*TemplateBuildDeleteRequest)) @@ -149,6 +175,24 @@ func _TemplateService_TemplateBuildDelete_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _TemplateService_HealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TemplateServiceServer).HealthStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/TemplateService/HealthStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TemplateServiceServer).HealthStatus(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + // TemplateService_ServiceDesc is the grpc.ServiceDesc for TemplateService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -156,17 +200,23 @@ var TemplateService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "TemplateService", HandlerType: (*TemplateServiceServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "TemplateCreate", + Handler: _TemplateService_TemplateCreate_Handler, + }, + { + MethodName: "TemplateBuildStatus", + Handler: _TemplateService_TemplateBuildStatus_Handler, + }, { MethodName: "TemplateBuildDelete", Handler: _TemplateService_TemplateBuildDelete_Handler, }, - }, - Streams: []grpc.StreamDesc{ { - StreamName: "TemplateCreate", - Handler: _TemplateService_TemplateCreate_Handler, - ServerStreams: true, + MethodName: "HealthStatus", + Handler: _TemplateService_HealthStatus_Handler, }, }, - Metadata: "template-manager/template-manager.proto", + Streams: []grpc.StreamDesc{}, + Metadata: "template-manager.proto", } diff --git a/packages/shared/pkg/health/main.go b/packages/shared/pkg/health/main.go new file mode 100644 index 0000000..72d975c --- /dev/null +++ b/packages/shared/pkg/health/main.go @@ -0,0 +1,14 @@ +package health + +type Status string + +const ( + Healthy Status = "healthy" + Unhealthy Status = "unhealthy" + Draining Status = "draining" +) + +type Response struct { + Status Status `json:"status"` + Version string `json:"version"` +} diff --git a/packages/shared/pkg/http/edge/api.gen.go b/packages/shared/pkg/http/edge/api.gen.go new file mode 100644 index 0000000..7bea2d4 --- /dev/null +++ b/packages/shared/pkg/http/edge/api.gen.go @@ -0,0 +1,329 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. +package api + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "github.com/oapi-codegen/runtime" +) + +// ServerInterface represents all server handlers. +type ServerInterface interface { + + // (GET /health) + HealthCheck(c *gin.Context) + + // (GET /health/machine) + HealthCheckMachine(c *gin.Context) + + // (GET /health/traffic) + HealthCheckTraffic(c *gin.Context) + + // (GET /v1/info) + V1Info(c *gin.Context) + // Delete a sandbox catalog entry + // (DELETE /v1/sandboxes/catalog) + V1SandboxCatalogDelete(c *gin.Context) + // Create a sandbox catalog entry + // (POST /v1/sandboxes/catalog) + V1SandboxCatalogCreate(c *gin.Context) + + // (GET /v1/service-discovery/nodes) + V1ServiceDiscoveryNodes(c *gin.Context) + // Get the orchestrators + // (GET /v1/service-discovery/nodes/orchestrators) + V1ServiceDiscoveryGetOrchestrators(c *gin.Context) + + // (POST /v1/service-discovery/nodes/{nodeID}/drain) + V1ServiceDiscoveryNodeDrain(c *gin.Context, nodeID string) + + // (POST /v1/service-discovery/nodes/{nodeID}/kill) + V1ServiceDiscoveryNodeKill(c *gin.Context, nodeID string) + // Template build logs + // (GET /v1/templates/builds/{buildID}/logs) + V1TemplateBuildLogs(c *gin.Context, buildID string, params V1TemplateBuildLogsParams) +} + +// ServerInterfaceWrapper converts contexts to parameters. +type ServerInterfaceWrapper struct { + Handler ServerInterface + HandlerMiddlewares []MiddlewareFunc + ErrorHandler func(*gin.Context, error, int) +} + +type MiddlewareFunc func(c *gin.Context) + +// HealthCheck operation middleware +func (siw *ServerInterfaceWrapper) HealthCheck(c *gin.Context) { + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.HealthCheck(c) +} + +// HealthCheckMachine operation middleware +func (siw *ServerInterfaceWrapper) HealthCheckMachine(c *gin.Context) { + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.HealthCheckMachine(c) +} + +// HealthCheckTraffic operation middleware +func (siw *ServerInterfaceWrapper) HealthCheckTraffic(c *gin.Context) { + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.HealthCheckTraffic(c) +} + +// V1Info operation middleware +func (siw *ServerInterfaceWrapper) V1Info(c *gin.Context) { + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1Info(c) +} + +// V1SandboxCatalogDelete operation middleware +func (siw *ServerInterfaceWrapper) V1SandboxCatalogDelete(c *gin.Context) { + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1SandboxCatalogDelete(c) +} + +// V1SandboxCatalogCreate operation middleware +func (siw *ServerInterfaceWrapper) V1SandboxCatalogCreate(c *gin.Context) { + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1SandboxCatalogCreate(c) +} + +// V1ServiceDiscoveryNodes operation middleware +func (siw *ServerInterfaceWrapper) V1ServiceDiscoveryNodes(c *gin.Context) { + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1ServiceDiscoveryNodes(c) +} + +// V1ServiceDiscoveryGetOrchestrators operation middleware +func (siw *ServerInterfaceWrapper) V1ServiceDiscoveryGetOrchestrators(c *gin.Context) { + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1ServiceDiscoveryGetOrchestrators(c) +} + +// V1ServiceDiscoveryNodeDrain operation middleware +func (siw *ServerInterfaceWrapper) V1ServiceDiscoveryNodeDrain(c *gin.Context) { + + var err error + + // ------------- Path parameter "nodeID" ------------- + var nodeID string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeID", c.Param("nodeID"), &nodeID, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter nodeID: %w", err), http.StatusBadRequest) + return + } + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1ServiceDiscoveryNodeDrain(c, nodeID) +} + +// V1ServiceDiscoveryNodeKill operation middleware +func (siw *ServerInterfaceWrapper) V1ServiceDiscoveryNodeKill(c *gin.Context) { + + var err error + + // ------------- Path parameter "nodeID" ------------- + var nodeID string + + err = runtime.BindStyledParameterWithOptions("simple", "nodeID", c.Param("nodeID"), &nodeID, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter nodeID: %w", err), http.StatusBadRequest) + return + } + + c.Set(ApiKeyAuthScopes, []string{}) + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1ServiceDiscoveryNodeKill(c, nodeID) +} + +// V1TemplateBuildLogs operation middleware +func (siw *ServerInterfaceWrapper) V1TemplateBuildLogs(c *gin.Context) { + + var err error + + // ------------- Path parameter "buildID" ------------- + var buildID string + + err = runtime.BindStyledParameterWithOptions("simple", "buildID", c.Param("buildID"), &buildID, runtime.BindStyledParameterOptions{Explode: false, Required: true}) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter buildID: %w", err), http.StatusBadRequest) + return + } + + c.Set(ApiKeyAuthScopes, []string{}) + + // Parameter object where we will unmarshal all parameters from the context + var params V1TemplateBuildLogsParams + + // ------------- Required query parameter "orchestratorID" ------------- + + if paramValue := c.Query("orchestratorID"); paramValue != "" { + + } else { + siw.ErrorHandler(c, fmt.Errorf("Query argument orchestratorID is required, but not found"), http.StatusBadRequest) + return + } + + err = runtime.BindQueryParameter("form", true, true, "orchestratorID", c.Request.URL.Query(), ¶ms.OrchestratorID) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter orchestratorID: %w", err), http.StatusBadRequest) + return + } + + // ------------- Required query parameter "templateID" ------------- + + if paramValue := c.Query("templateID"); paramValue != "" { + + } else { + siw.ErrorHandler(c, fmt.Errorf("Query argument templateID is required, but not found"), http.StatusBadRequest) + return + } + + err = runtime.BindQueryParameter("form", true, true, "templateID", c.Request.URL.Query(), ¶ms.TemplateID) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter templateID: %w", err), http.StatusBadRequest) + return + } + + // ------------- Optional query parameter "offset" ------------- + + err = runtime.BindQueryParameter("form", true, false, "offset", c.Request.URL.Query(), ¶ms.Offset) + if err != nil { + siw.ErrorHandler(c, fmt.Errorf("Invalid format for parameter offset: %w", err), http.StatusBadRequest) + return + } + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + if c.IsAborted() { + return + } + } + + siw.Handler.V1TemplateBuildLogs(c, buildID, params) +} + +// GinServerOptions provides options for the Gin server. +type GinServerOptions struct { + BaseURL string + Middlewares []MiddlewareFunc + ErrorHandler func(*gin.Context, error, int) +} + +// RegisterHandlers creates http.Handler with routing matching OpenAPI spec. +func RegisterHandlers(router gin.IRouter, si ServerInterface) { + RegisterHandlersWithOptions(router, si, GinServerOptions{}) +} + +// RegisterHandlersWithOptions creates http.Handler with additional options +func RegisterHandlersWithOptions(router gin.IRouter, si ServerInterface, options GinServerOptions) { + errorHandler := options.ErrorHandler + if errorHandler == nil { + errorHandler = func(c *gin.Context, err error, statusCode int) { + c.JSON(statusCode, gin.H{"msg": err.Error()}) + } + } + + wrapper := ServerInterfaceWrapper{ + Handler: si, + HandlerMiddlewares: options.Middlewares, + ErrorHandler: errorHandler, + } + + router.GET(options.BaseURL+"/health", wrapper.HealthCheck) + router.GET(options.BaseURL+"/health/machine", wrapper.HealthCheckMachine) + router.GET(options.BaseURL+"/health/traffic", wrapper.HealthCheckTraffic) + router.GET(options.BaseURL+"/v1/info", wrapper.V1Info) + router.DELETE(options.BaseURL+"/v1/sandboxes/catalog", wrapper.V1SandboxCatalogDelete) + router.POST(options.BaseURL+"/v1/sandboxes/catalog", wrapper.V1SandboxCatalogCreate) + router.GET(options.BaseURL+"/v1/service-discovery/nodes", wrapper.V1ServiceDiscoveryNodes) + router.GET(options.BaseURL+"/v1/service-discovery/nodes/orchestrators", wrapper.V1ServiceDiscoveryGetOrchestrators) + router.POST(options.BaseURL+"/v1/service-discovery/nodes/:nodeID/drain", wrapper.V1ServiceDiscoveryNodeDrain) + router.POST(options.BaseURL+"/v1/service-discovery/nodes/:nodeID/kill", wrapper.V1ServiceDiscoveryNodeKill) + router.GET(options.BaseURL+"/v1/templates/builds/:buildID/logs", wrapper.V1TemplateBuildLogs) +} diff --git a/packages/shared/pkg/http/edge/client.gen.go b/packages/shared/pkg/http/edge/client.gen.go new file mode 100644 index 0000000..9ab4049 --- /dev/null +++ b/packages/shared/pkg/http/edge/client.gen.go @@ -0,0 +1,1517 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/oapi-codegen/runtime" +) + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // HealthCheck request + HealthCheck(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // HealthCheckMachine request + HealthCheckMachine(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // HealthCheckTraffic request + HealthCheckTraffic(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1Info request + V1Info(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1SandboxCatalogDeleteWithBody request with any body + V1SandboxCatalogDeleteWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + V1SandboxCatalogDelete(ctx context.Context, body V1SandboxCatalogDeleteJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1SandboxCatalogCreateWithBody request with any body + V1SandboxCatalogCreateWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + V1SandboxCatalogCreate(ctx context.Context, body V1SandboxCatalogCreateJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1ServiceDiscoveryNodes request + V1ServiceDiscoveryNodes(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1ServiceDiscoveryGetOrchestrators request + V1ServiceDiscoveryGetOrchestrators(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1ServiceDiscoveryNodeDrain request + V1ServiceDiscoveryNodeDrain(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1ServiceDiscoveryNodeKill request + V1ServiceDiscoveryNodeKill(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // V1TemplateBuildLogs request + V1TemplateBuildLogs(ctx context.Context, buildID string, params *V1TemplateBuildLogsParams, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) HealthCheck(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewHealthCheckRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) HealthCheckMachine(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewHealthCheckMachineRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) HealthCheckTraffic(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewHealthCheckTrafficRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1Info(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1InfoRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1SandboxCatalogDeleteWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1SandboxCatalogDeleteRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1SandboxCatalogDelete(ctx context.Context, body V1SandboxCatalogDeleteJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1SandboxCatalogDeleteRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1SandboxCatalogCreateWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1SandboxCatalogCreateRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1SandboxCatalogCreate(ctx context.Context, body V1SandboxCatalogCreateJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1SandboxCatalogCreateRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1ServiceDiscoveryNodes(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1ServiceDiscoveryNodesRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1ServiceDiscoveryGetOrchestrators(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1ServiceDiscoveryGetOrchestratorsRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1ServiceDiscoveryNodeDrain(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1ServiceDiscoveryNodeDrainRequest(c.Server, nodeID) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1ServiceDiscoveryNodeKill(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1ServiceDiscoveryNodeKillRequest(c.Server, nodeID) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) V1TemplateBuildLogs(ctx context.Context, buildID string, params *V1TemplateBuildLogsParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewV1TemplateBuildLogsRequest(c.Server, buildID, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewHealthCheckRequest generates requests for HealthCheck +func NewHealthCheckRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/health") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewHealthCheckMachineRequest generates requests for HealthCheckMachine +func NewHealthCheckMachineRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/health/machine") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewHealthCheckTrafficRequest generates requests for HealthCheckTraffic +func NewHealthCheckTrafficRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/health/traffic") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1InfoRequest generates requests for V1Info +func NewV1InfoRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/info") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1SandboxCatalogDeleteRequest calls the generic V1SandboxCatalogDelete builder with application/json body +func NewV1SandboxCatalogDeleteRequest(server string, body V1SandboxCatalogDeleteJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewV1SandboxCatalogDeleteRequestWithBody(server, "application/json", bodyReader) +} + +// NewV1SandboxCatalogDeleteRequestWithBody generates requests for V1SandboxCatalogDelete with any type of body +func NewV1SandboxCatalogDeleteRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/sandboxes/catalog") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewV1SandboxCatalogCreateRequest calls the generic V1SandboxCatalogCreate builder with application/json body +func NewV1SandboxCatalogCreateRequest(server string, body V1SandboxCatalogCreateJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewV1SandboxCatalogCreateRequestWithBody(server, "application/json", bodyReader) +} + +// NewV1SandboxCatalogCreateRequestWithBody generates requests for V1SandboxCatalogCreate with any type of body +func NewV1SandboxCatalogCreateRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/sandboxes/catalog") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewV1ServiceDiscoveryNodesRequest generates requests for V1ServiceDiscoveryNodes +func NewV1ServiceDiscoveryNodesRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/service-discovery/nodes") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1ServiceDiscoveryGetOrchestratorsRequest generates requests for V1ServiceDiscoveryGetOrchestrators +func NewV1ServiceDiscoveryGetOrchestratorsRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/service-discovery/nodes/orchestrators") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1ServiceDiscoveryNodeDrainRequest generates requests for V1ServiceDiscoveryNodeDrain +func NewV1ServiceDiscoveryNodeDrainRequest(server string, nodeID string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "nodeID", runtime.ParamLocationPath, nodeID) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/service-discovery/nodes/%s/drain", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1ServiceDiscoveryNodeKillRequest generates requests for V1ServiceDiscoveryNodeKill +func NewV1ServiceDiscoveryNodeKillRequest(server string, nodeID string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "nodeID", runtime.ParamLocationPath, nodeID) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/service-discovery/nodes/%s/kill", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewV1TemplateBuildLogsRequest generates requests for V1TemplateBuildLogs +func NewV1TemplateBuildLogsRequest(server string, buildID string, params *V1TemplateBuildLogsParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "buildID", runtime.ParamLocationPath, buildID) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/v1/templates/builds/%s/logs", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + if params != nil { + queryValues := queryURL.Query() + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "orchestratorID", runtime.ParamLocationQuery, params.OrchestratorID); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "templateID", runtime.ParamLocationQuery, params.TemplateID); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + if params.Offset != nil { + + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "offset", runtime.ParamLocationQuery, *params.Offset); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + + } + + queryURL.RawQuery = queryValues.Encode() + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // HealthCheckWithResponse request + HealthCheckWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckResponse, error) + + // HealthCheckMachineWithResponse request + HealthCheckMachineWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckMachineResponse, error) + + // HealthCheckTrafficWithResponse request + HealthCheckTrafficWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckTrafficResponse, error) + + // V1InfoWithResponse request + V1InfoWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1InfoResponse, error) + + // V1SandboxCatalogDeleteWithBodyWithResponse request with any body + V1SandboxCatalogDeleteWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1SandboxCatalogDeleteResponse, error) + + V1SandboxCatalogDeleteWithResponse(ctx context.Context, body V1SandboxCatalogDeleteJSONRequestBody, reqEditors ...RequestEditorFn) (*V1SandboxCatalogDeleteResponse, error) + + // V1SandboxCatalogCreateWithBodyWithResponse request with any body + V1SandboxCatalogCreateWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1SandboxCatalogCreateResponse, error) + + V1SandboxCatalogCreateWithResponse(ctx context.Context, body V1SandboxCatalogCreateJSONRequestBody, reqEditors ...RequestEditorFn) (*V1SandboxCatalogCreateResponse, error) + + // V1ServiceDiscoveryNodesWithResponse request + V1ServiceDiscoveryNodesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodesResponse, error) + + // V1ServiceDiscoveryGetOrchestratorsWithResponse request + V1ServiceDiscoveryGetOrchestratorsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryGetOrchestratorsResponse, error) + + // V1ServiceDiscoveryNodeDrainWithResponse request + V1ServiceDiscoveryNodeDrainWithResponse(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodeDrainResponse, error) + + // V1ServiceDiscoveryNodeKillWithResponse request + V1ServiceDiscoveryNodeKillWithResponse(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodeKillResponse, error) + + // V1TemplateBuildLogsWithResponse request + V1TemplateBuildLogsWithResponse(ctx context.Context, buildID string, params *V1TemplateBuildLogsParams, reqEditors ...RequestEditorFn) (*V1TemplateBuildLogsResponse, error) +} + +type HealthCheckResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r HealthCheckResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r HealthCheckResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type HealthCheckMachineResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r HealthCheckMachineResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r HealthCheckMachineResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type HealthCheckTrafficResponse struct { + Body []byte + HTTPResponse *http.Response +} + +// Status returns HTTPResponse.Status +func (r HealthCheckTrafficResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r HealthCheckTrafficResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1InfoResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *ClusterNodeInfo + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1InfoResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1InfoResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1SandboxCatalogDeleteResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *N400 + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1SandboxCatalogDeleteResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1SandboxCatalogDeleteResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1SandboxCatalogCreateResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *N400 + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1SandboxCatalogCreateResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1SandboxCatalogCreateResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1ServiceDiscoveryNodesResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]ClusterNode + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1ServiceDiscoveryNodesResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1ServiceDiscoveryNodesResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1ServiceDiscoveryGetOrchestratorsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]ClusterOrchestratorNode + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1ServiceDiscoveryGetOrchestratorsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1ServiceDiscoveryGetOrchestratorsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1ServiceDiscoveryNodeDrainResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *N400 + JSON401 *N401 + JSON404 *N404 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1ServiceDiscoveryNodeDrainResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1ServiceDiscoveryNodeDrainResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1ServiceDiscoveryNodeKillResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *N400 + JSON401 *N401 + JSON404 *N404 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1ServiceDiscoveryNodeKillResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1ServiceDiscoveryNodeKillResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type V1TemplateBuildLogsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *TemplateBuildLogsResponse + JSON400 *N400 + JSON401 *N401 + JSON500 *N500 +} + +// Status returns HTTPResponse.Status +func (r V1TemplateBuildLogsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r V1TemplateBuildLogsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// HealthCheckWithResponse request returning *HealthCheckResponse +func (c *ClientWithResponses) HealthCheckWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckResponse, error) { + rsp, err := c.HealthCheck(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseHealthCheckResponse(rsp) +} + +// HealthCheckMachineWithResponse request returning *HealthCheckMachineResponse +func (c *ClientWithResponses) HealthCheckMachineWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckMachineResponse, error) { + rsp, err := c.HealthCheckMachine(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseHealthCheckMachineResponse(rsp) +} + +// HealthCheckTrafficWithResponse request returning *HealthCheckTrafficResponse +func (c *ClientWithResponses) HealthCheckTrafficWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*HealthCheckTrafficResponse, error) { + rsp, err := c.HealthCheckTraffic(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseHealthCheckTrafficResponse(rsp) +} + +// V1InfoWithResponse request returning *V1InfoResponse +func (c *ClientWithResponses) V1InfoWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1InfoResponse, error) { + rsp, err := c.V1Info(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1InfoResponse(rsp) +} + +// V1SandboxCatalogDeleteWithBodyWithResponse request with arbitrary body returning *V1SandboxCatalogDeleteResponse +func (c *ClientWithResponses) V1SandboxCatalogDeleteWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1SandboxCatalogDeleteResponse, error) { + rsp, err := c.V1SandboxCatalogDeleteWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1SandboxCatalogDeleteResponse(rsp) +} + +func (c *ClientWithResponses) V1SandboxCatalogDeleteWithResponse(ctx context.Context, body V1SandboxCatalogDeleteJSONRequestBody, reqEditors ...RequestEditorFn) (*V1SandboxCatalogDeleteResponse, error) { + rsp, err := c.V1SandboxCatalogDelete(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1SandboxCatalogDeleteResponse(rsp) +} + +// V1SandboxCatalogCreateWithBodyWithResponse request with arbitrary body returning *V1SandboxCatalogCreateResponse +func (c *ClientWithResponses) V1SandboxCatalogCreateWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*V1SandboxCatalogCreateResponse, error) { + rsp, err := c.V1SandboxCatalogCreateWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1SandboxCatalogCreateResponse(rsp) +} + +func (c *ClientWithResponses) V1SandboxCatalogCreateWithResponse(ctx context.Context, body V1SandboxCatalogCreateJSONRequestBody, reqEditors ...RequestEditorFn) (*V1SandboxCatalogCreateResponse, error) { + rsp, err := c.V1SandboxCatalogCreate(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1SandboxCatalogCreateResponse(rsp) +} + +// V1ServiceDiscoveryNodesWithResponse request returning *V1ServiceDiscoveryNodesResponse +func (c *ClientWithResponses) V1ServiceDiscoveryNodesWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodesResponse, error) { + rsp, err := c.V1ServiceDiscoveryNodes(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1ServiceDiscoveryNodesResponse(rsp) +} + +// V1ServiceDiscoveryGetOrchestratorsWithResponse request returning *V1ServiceDiscoveryGetOrchestratorsResponse +func (c *ClientWithResponses) V1ServiceDiscoveryGetOrchestratorsWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryGetOrchestratorsResponse, error) { + rsp, err := c.V1ServiceDiscoveryGetOrchestrators(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1ServiceDiscoveryGetOrchestratorsResponse(rsp) +} + +// V1ServiceDiscoveryNodeDrainWithResponse request returning *V1ServiceDiscoveryNodeDrainResponse +func (c *ClientWithResponses) V1ServiceDiscoveryNodeDrainWithResponse(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodeDrainResponse, error) { + rsp, err := c.V1ServiceDiscoveryNodeDrain(ctx, nodeID, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1ServiceDiscoveryNodeDrainResponse(rsp) +} + +// V1ServiceDiscoveryNodeKillWithResponse request returning *V1ServiceDiscoveryNodeKillResponse +func (c *ClientWithResponses) V1ServiceDiscoveryNodeKillWithResponse(ctx context.Context, nodeID string, reqEditors ...RequestEditorFn) (*V1ServiceDiscoveryNodeKillResponse, error) { + rsp, err := c.V1ServiceDiscoveryNodeKill(ctx, nodeID, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1ServiceDiscoveryNodeKillResponse(rsp) +} + +// V1TemplateBuildLogsWithResponse request returning *V1TemplateBuildLogsResponse +func (c *ClientWithResponses) V1TemplateBuildLogsWithResponse(ctx context.Context, buildID string, params *V1TemplateBuildLogsParams, reqEditors ...RequestEditorFn) (*V1TemplateBuildLogsResponse, error) { + rsp, err := c.V1TemplateBuildLogs(ctx, buildID, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseV1TemplateBuildLogsResponse(rsp) +} + +// ParseHealthCheckResponse parses an HTTP response from a HealthCheckWithResponse call +func ParseHealthCheckResponse(rsp *http.Response) (*HealthCheckResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &HealthCheckResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseHealthCheckMachineResponse parses an HTTP response from a HealthCheckMachineWithResponse call +func ParseHealthCheckMachineResponse(rsp *http.Response) (*HealthCheckMachineResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &HealthCheckMachineResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseHealthCheckTrafficResponse parses an HTTP response from a HealthCheckTrafficWithResponse call +func ParseHealthCheckTrafficResponse(rsp *http.Response) (*HealthCheckTrafficResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &HealthCheckTrafficResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + return response, nil +} + +// ParseV1InfoResponse parses an HTTP response from a V1InfoWithResponse call +func ParseV1InfoResponse(rsp *http.Response) (*V1InfoResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1InfoResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest ClusterNodeInfo + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1SandboxCatalogDeleteResponse parses an HTTP response from a V1SandboxCatalogDeleteWithResponse call +func ParseV1SandboxCatalogDeleteResponse(rsp *http.Response) (*V1SandboxCatalogDeleteResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1SandboxCatalogDeleteResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest N400 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1SandboxCatalogCreateResponse parses an HTTP response from a V1SandboxCatalogCreateWithResponse call +func ParseV1SandboxCatalogCreateResponse(rsp *http.Response) (*V1SandboxCatalogCreateResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1SandboxCatalogCreateResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest N400 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1ServiceDiscoveryNodesResponse parses an HTTP response from a V1ServiceDiscoveryNodesWithResponse call +func ParseV1ServiceDiscoveryNodesResponse(rsp *http.Response) (*V1ServiceDiscoveryNodesResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1ServiceDiscoveryNodesResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []ClusterNode + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1ServiceDiscoveryGetOrchestratorsResponse parses an HTTP response from a V1ServiceDiscoveryGetOrchestratorsWithResponse call +func ParseV1ServiceDiscoveryGetOrchestratorsResponse(rsp *http.Response) (*V1ServiceDiscoveryGetOrchestratorsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1ServiceDiscoveryGetOrchestratorsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []ClusterOrchestratorNode + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1ServiceDiscoveryNodeDrainResponse parses an HTTP response from a V1ServiceDiscoveryNodeDrainWithResponse call +func ParseV1ServiceDiscoveryNodeDrainResponse(rsp *http.Response) (*V1ServiceDiscoveryNodeDrainResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1ServiceDiscoveryNodeDrainResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest N400 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest N404 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1ServiceDiscoveryNodeKillResponse parses an HTTP response from a V1ServiceDiscoveryNodeKillWithResponse call +func ParseV1ServiceDiscoveryNodeKillResponse(rsp *http.Response) (*V1ServiceDiscoveryNodeKillResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1ServiceDiscoveryNodeKillResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest N400 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 404: + var dest N404 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON404 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} + +// ParseV1TemplateBuildLogsResponse parses an HTTP response from a V1TemplateBuildLogsWithResponse call +func ParseV1TemplateBuildLogsResponse(rsp *http.Response) (*V1TemplateBuildLogsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &V1TemplateBuildLogsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest TemplateBuildLogsResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest N400 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 401: + var dest N401 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON401 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest N500 + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + } + + return response, nil +} diff --git a/packages/shared/pkg/http/edge/spec.gen.go b/packages/shared/pkg/http/edge/spec.gen.go new file mode 100644 index 0000000..da86108 --- /dev/null +++ b/packages/shared/pkg/http/edge/spec.gen.go @@ -0,0 +1,122 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. +package api + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "net/url" + "path" + "strings" + + "github.com/getkin/kin-openapi/openapi3" +) + +// Base64 encoded, gzipped, json marshaled Swagger object +var swaggerSpec = []string{ + + "H4sIAAAAAAAC/+xZW1Pbuhb+Kxqd82hIaDnnIW8QOm2mhXaAMmeGyYOwV2IVW3J1Sckw/u9ndPHdSUwJ", + "tN17P2Fsad2+tT4trTzikKcZZ8CUxJNHLEBmnEmw/xyPx+ZPyJkCpswjybKEhkRRzkbfJGfmnQxjSIl5", + "+reABZ7gf40qmSP3VY7eCcEFzvM8wBHIUNDMCMETfEoiJOC7BqlwHuDj8dHL6zzRKgamvFQEbp1Rfvzy", + "yi+4QguuWWQ0/uc1QnwFYgWicDMPvDyL8TTRUoG44BGYfzPBMxCKugRgPILZmXlquxABmp3hAKt1BniC", + "pRKULY1DEsSKhvCBS7VhXyboiihAMZcKkSgSICUiLEJ+K8q4UFtEz5hUhIW9hl15EdSv2W7klSJCQXTS", + "Y+k1TQH9iIEhFQMycUA/iEQCltSECyIc4AUXKVF4giOi4EDRFLbrUlrugrCGht9QSbi2kgfvt8ur3Tcg", + "JHXZ1PTUf0B8YV31y7d44jdMeZrSnshdcS1CQKEJ2Wqg7DzAhgKoievktki7PsQ7/mwwqwfjZiDbwDQz", + "d54H9cqYsQXfZ3UMSeHdmauzQXkrXQReP2FfLeVufpM001knrVqJVAW1BboyjOjtD916ix4OMDCdGnNj", + "IImK1zjAkSCUGY8CrFnxet4TvjYddPROa6qQcnVR6OMijEEqQRQX5nW0hG1KPteW958mKShBwzMq789P", + "v0qDQduck5RrpkwcIirvkcyIYRItBDCVrJGWECHK0PlpPZkpU/89rsCmTMES7HHuFF6SdLe+y5PzZyu6", + "Iiy64w8gLzWz8LyOfzfTTPe7d6HTOxBG3Wr65ausaaLMKBum5SdoTvDEQU4VpEM5pJ5Alzyxh5eXTIQg", + "63+6i19C1oXTFRP+bGOwS9DrdgE2h7pdgEvdTm11ySTo8tlGIpj306TN8i4nk4zc0YSqdXEgtJm4n6AV", + "pFlicuVO0yQC0UvW7o7QoebQE3bTErvYdnItpnj7ZgMfSUmWmwTtBNwrKqSYoPlITgUQBVOiSMKXl/6y", + "2HECHiDURuPMcmHH93q8Zj10WYfGVKcA1044GxCVKEtIaHHuloBbtEGz/3pOHj4BW6q4q/ucPNBUpyjS", + "wt1JKUMx10IO42ivwKa5IZddTGDWSEXSrANCK0hBI6p1P3u86rGjykF+9w1Ce8X3oJ5BAs8HdVvgW65t", + "8qTPxmtfTKemlj7xpbz0g5GuhQlf+o5uQXSi8OR23plxGCnILgyqM7HjS/O0a1lvd5uaqLCbPA46JSxD", + "h1pQtb4y+DuzTzL6EdYn2mUjNWbGQAxvBJgRk0L4fwcnX2YHH2FdySR2lxsyUH8rUlQZHsPv3pyid6ZL", + "DPCqOE3w+PDocGyrLwNGMoon+O3h+HCMA5wRFVtTRq6HNY9L6Dk5PtjPKIwhvMdWkisSA7r/OPXfGvOr", + "N2640hTmM80euFKHIUi50InxKA8KS0YpCWPKYJBFaMEF8hvMZcudIxuNPPei92arEmSxoOFwW/0GlAn+", + "sN5m6rWX/HxTV0ejIlt6bTRpU3ZoZqXJanewN627ObJ38X6L9jJFa1/7++ZppX/JGglQWjCIXBtXt72a", + "ZvbpKx0YmUXVKHD7WrOoiqksGo1R6GjURdbwqnlqh644Td1ax7/YsQxIdcqj9d6iuI3j8ya1KaEhH5Ji", + "jbA7L6PGCe2DgIApsXbhHw8J//gFoaq4F09um6x7O8/nAZY6TYlY4wl2wUJkgz8BVsScNLe4hN2eB5m/", + "DG1H27VQL4t2b5u2F7RDK/kvhbYL1hPRLureMeVBRGXIVyDWI0M+ciO9vgdVn4yhct8OrvVXtrNi+YXV", + "8kzyLfsfkiSfFzZOA/kY5/NulzSQnUmSNAZr8ldmQQltG8edEI/q/Xkd8F3QvQf1ubF1Xyg+cbLjcHwO", + "is0I/Ba1XNQXb0X4GTg/uvFHPrLDXnvr6B16nRNx71oPIlFtMDyklM+saNOIC5KCApNQt5vHfPaOYJr2", + "6oZQzmiaBB/UEqV9GZk/mfztDxiIL5ybpY8vyPj+d+Bda49/I14o8+WeJsnmdPlIk6QcOg5ME7PnT8sS", + "G4W/Z4YUc0A5soNAOXq0f01uFKOKDWdGZ+jRRb0HXS/9SfAGXtJ3DbbX8aIaw6e9SCyC8XRpzVSbsQge", + "yh8YTapRtkR3xVwHqZgoJGOukwjdQXVg/aAqtnsKQ4oKaXu+WEiw8+jSpnKeNO4ZvqaU0VSn9mN7HLip", + "ePbS6m8ejA0+xOvhqEIo/7wW/rrHiapEyzo0pZmX7x8LyLslbNKu+Fg2/fk8/38AAAD//74mLAulJQAA", +} + +// GetSwagger returns the content of the embedded swagger specification file +// or error if failed to decode +func decodeSpec() ([]byte, error) { + zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, "")) + if err != nil { + return nil, fmt.Errorf("error base64 decoding spec: %w", err) + } + zr, err := gzip.NewReader(bytes.NewReader(zipped)) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + var buf bytes.Buffer + _, err = buf.ReadFrom(zr) + if err != nil { + return nil, fmt.Errorf("error decompressing spec: %w", err) + } + + return buf.Bytes(), nil +} + +var rawSpec = decodeSpecCached() + +// a naive cached of a decoded swagger spec +func decodeSpecCached() func() ([]byte, error) { + data, err := decodeSpec() + return func() ([]byte, error) { + return data, err + } +} + +// Constructs a synthetic filesystem for resolving external references when loading openapi specifications. +func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) { + res := make(map[string]func() ([]byte, error)) + if len(pathToFile) > 0 { + res[pathToFile] = rawSpec + } + + return res +} + +// GetSwagger returns the Swagger specification corresponding to the generated code +// in this file. The external references of Swagger specification are resolved. +// The logic of resolving external references is tightly connected to "import-mapping" feature. +// Externally referenced files must be embedded in the corresponding golang packages. +// Urls can be supported but this task was out of the scope. +func GetSwagger() (swagger *openapi3.T, err error) { + resolvePath := PathToRawSpec("") + + loader := openapi3.NewLoader() + loader.IsExternalRefsAllowed = true + loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) { + pathToFile := url.String() + pathToFile = path.Clean(pathToFile) + getSpec, ok := resolvePath[pathToFile] + if !ok { + err1 := fmt.Errorf("path not found: %s", pathToFile) + return nil, err1 + } + return getSpec() + } + var specData []byte + specData, err = rawSpec() + if err != nil { + return + } + swagger, err = loader.LoadFromData(specData) + if err != nil { + return + } + return +} diff --git a/packages/shared/pkg/http/edge/types.gen.go b/packages/shared/pkg/http/edge/types.gen.go new file mode 100644 index 0000000..1ffb2c0 --- /dev/null +++ b/packages/shared/pkg/http/edge/types.gen.go @@ -0,0 +1,189 @@ +// Package api provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.4.1 DO NOT EDIT. +package api + +import ( + "time" +) + +const ( + ApiKeyAuthScopes = "ApiKeyAuth.Scopes" +) + +// Defines values for ClusterNodeStatus. +const ( + Draining ClusterNodeStatus = "draining" + Healthy ClusterNodeStatus = "healthy" + Unhealthy ClusterNodeStatus = "unhealthy" +) + +// Defines values for ClusterNodeType. +const ( + ClusterNodeTypeEdge ClusterNodeType = "edge" + ClusterNodeTypeOrchestrator ClusterNodeType = "orchestrator" +) + +// Defines values for ClusterOrchestratorRole. +const ( + ClusterOrchestratorRoleOrchestrator ClusterOrchestratorRole = "orchestrator" + ClusterOrchestratorRoleTemplateBuilder ClusterOrchestratorRole = "template-builder" +) + +// ClusterNode defines model for ClusterNode. +type ClusterNode struct { + // NodeID Node ID + NodeID string `json:"nodeID"` + + // ServiceHost Node private host address and service port + ServiceHost string `json:"serviceHost"` + + // ServiceInstanceID Service instance ID + ServiceInstanceID string `json:"serviceInstanceID"` + + // ServiceStartedAt Time when the node was registered + ServiceStartedAt time.Time `json:"serviceStartedAt"` + + // ServiceStatus State of the cluster node + ServiceStatus ClusterNodeStatus `json:"serviceStatus"` + + // ServiceType Cluster node type + ServiceType ClusterNodeType `json:"serviceType"` + + // ServiceVersion Version of the service + ServiceVersion string `json:"serviceVersion"` + + // ServiceVersionCommit Source code version of the service + ServiceVersionCommit string `json:"serviceVersionCommit"` +} + +// ClusterNodeInfo defines model for ClusterNodeInfo. +type ClusterNodeInfo struct { + // NodeID Node ID + NodeID string `json:"nodeID"` + + // ServiceInstanceID Service ID + ServiceInstanceID string `json:"serviceInstanceID"` + + // ServiceStartup Time when the node started + ServiceStartup time.Time `json:"serviceStartup"` + + // ServiceStatus State of the cluster node + ServiceStatus ClusterNodeStatus `json:"serviceStatus"` + + // ServiceVersion Version of the service + ServiceVersion string `json:"serviceVersion"` + + // ServiceVersionCommit Version of the service + ServiceVersionCommit string `json:"serviceVersionCommit"` +} + +// ClusterNodeStatus State of the cluster node +type ClusterNodeStatus string + +// ClusterNodeType Cluster node type +type ClusterNodeType string + +// ClusterOrchestratorNode defines model for ClusterOrchestratorNode. +type ClusterOrchestratorNode struct { + // MetricDiskMBUsed Amount of disk space currently used in MB + MetricDiskMBUsed int64 `json:"metricDiskMBUsed"` + + // MetricRamMBUsed Amount of RAM currently used in MB + MetricRamMBUsed int64 `json:"metricRamMBUsed"` + + // MetricSandboxesRunning Amount of disk space currently used in MB + MetricSandboxesRunning int64 `json:"metricSandboxesRunning"` + + // MetricVCpuUsed Number of vCPUs currently in use + MetricVCpuUsed int64 `json:"metricVCpuUsed"` + + // NodeID Node ID + NodeID string `json:"nodeID"` + Roles []ClusterOrchestratorRole `json:"roles"` + + // ServiceHost Node private host address and service port + ServiceHost string `json:"serviceHost"` + + // ServiceInstanceID Service instance ID + ServiceInstanceID string `json:"serviceInstanceID"` + + // ServiceStartedAt Time when the node was registered + ServiceStartedAt time.Time `json:"serviceStartedAt"` + + // ServiceStatus State of the cluster node + ServiceStatus ClusterNodeStatus `json:"serviceStatus"` + + // ServiceVersion Service Version + ServiceVersion string `json:"serviceVersion"` + + // ServiceVersionCommit Service Version + ServiceVersionCommit string `json:"serviceVersionCommit"` +} + +// ClusterOrchestratorRole Capability of the orchestrator +type ClusterOrchestratorRole string + +// Error defines model for Error. +type Error struct { + // Code Error code + Code int32 `json:"code"` + + // Message Error + Message string `json:"message"` +} + +// SandboxCreateCatalogRequest defines model for SandboxCreateCatalogRequest. +type SandboxCreateCatalogRequest struct { + ExecutionId string `json:"executionId"` + + // OrchestratorId Orchestrator where the sandbox is placed + OrchestratorId string `json:"orchestratorId"` + SandboxId string `json:"sandboxId"` + + // SandboxMaxLength Maximum duration in hours + SandboxMaxLength int64 `json:"sandboxMaxLength"` + SandboxStartTime Timestamp `json:"sandboxStartTime"` +} + +// SandboxDeleteCatalogRequest defines model for SandboxDeleteCatalogRequest. +type SandboxDeleteCatalogRequest struct { + ExecutionId string `json:"executionId"` + SandboxId string `json:"sandboxId"` +} + +// TemplateBuildLogsResponse defines model for TemplateBuildLogsResponse. +type TemplateBuildLogsResponse struct { + // Logs Build logs + Logs []string `json:"logs"` +} + +// Timestamp defines model for Timestamp. +type Timestamp = time.Time + +// N400 defines model for 400. +type N400 = Error + +// N401 defines model for 401. +type N401 = Error + +// N404 defines model for 404. +type N404 = Error + +// N500 defines model for 500. +type N500 = Error + +// V1TemplateBuildLogsParams defines parameters for V1TemplateBuildLogs. +type V1TemplateBuildLogsParams struct { + OrchestratorID string `form:"orchestratorID" json:"orchestratorID"` + TemplateID string `form:"templateID" json:"templateID"` + + // Offset Index of the starting build log that should be returned with the template + Offset *int32 `form:"offset,omitempty" json:"offset,omitempty"` +} + +// V1SandboxCatalogDeleteJSONRequestBody defines body for V1SandboxCatalogDelete for application/json ContentType. +type V1SandboxCatalogDeleteJSONRequestBody = SandboxDeleteCatalogRequest + +// V1SandboxCatalogCreateJSONRequestBody defines body for V1SandboxCatalogCreate for application/json ContentType. +type V1SandboxCatalogCreateJSONRequestBody = SandboxCreateCatalogRequest diff --git a/packages/shared/pkg/keys/constants.go b/packages/shared/pkg/keys/constants.go new file mode 100644 index 0000000..ec0be28 --- /dev/null +++ b/packages/shared/pkg/keys/constants.go @@ -0,0 +1,6 @@ +package keys + +const ( + ApiKeyPrefix = "e2b_" + AccessTokenPrefix = "sk_e2b_" +) diff --git a/packages/shared/pkg/keys/hashing.go b/packages/shared/pkg/keys/hashing.go new file mode 100644 index 0000000..a5572f2 --- /dev/null +++ b/packages/shared/pkg/keys/hashing.go @@ -0,0 +1,5 @@ +package keys + +type Hasher interface { + Hash(key []byte) string +} diff --git a/packages/shared/pkg/keys/hmac_sha256.go b/packages/shared/pkg/keys/hmac_sha256.go new file mode 100644 index 0000000..b0957a4 --- /dev/null +++ b/packages/shared/pkg/keys/hmac_sha256.go @@ -0,0 +1,25 @@ +package keys + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" +) + +type HMACSha256Hashing struct { + key []byte +} + +func NewHMACSHA256Hashing(key []byte) *HMACSha256Hashing { + return &HMACSha256Hashing{key: key} +} + +func (h *HMACSha256Hashing) Hash(content []byte) (string, error) { + mac := hmac.New(sha256.New, h.key) + _, err := mac.Write(content) + if err != nil { + return "", err + } + + return hex.EncodeToString(mac.Sum(nil)), nil +} diff --git a/packages/shared/pkg/keys/hmac_sha256_test.go b/packages/shared/pkg/keys/hmac_sha256_test.go new file mode 100644 index 0000000..5d47e10 --- /dev/null +++ b/packages/shared/pkg/keys/hmac_sha256_test.go @@ -0,0 +1,70 @@ +package keys + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHMACSha256Hashing_ValidHash(t *testing.T) { + key := []byte("test-key") + hasher := NewHMACSHA256Hashing(key) + content := []byte("hello world") + expectedHash := "18c4b268f0bbf8471eda56af3e70b1d4613d734dc538b4940b59931c412a1591" + actualHash, err := hasher.Hash(content) + require.Nil(t, err) + + if actualHash != expectedHash { + t.Errorf("expected %s, got %s", expectedHash, actualHash) + } +} + +func TestHMACSha256Hashing_EmptyContent(t *testing.T) { + key := []byte("test-key") + hasher := NewHMACSHA256Hashing(key) + content := []byte("") + expectedHash := "2711cc23e9ab1b8a9bc0fe991238da92671624a9ebdaf1c1abec06e7e9a14f9b" + actualHash, err := hasher.Hash(content) + require.Nil(t, err) + + if actualHash != expectedHash { + t.Errorf("expected %s, got %s", expectedHash, actualHash) + } +} + +func TestHMACSha256Hashing_DifferentKey(t *testing.T) { + key := []byte("test-key") + hasher := NewHMACSHA256Hashing(key) + differentKeyHasher := NewHMACSHA256Hashing([]byte("different-key")) + content := []byte("hello world") + + hashWithOriginalKey, err := hasher.Hash(content) + require.Nil(t, err) + + hashWithDifferentKey, err := differentKeyHasher.Hash(content) + require.Nil(t, err) + + if hashWithOriginalKey == hashWithDifferentKey { + t.Errorf("hashes with different keys should not match") + } +} + +func TestHMACSha256Hashing_IdenticalResult(t *testing.T) { + key := []byte("placeholder-hashing-key") + content := []byte("test content for hashing") + + mac := hmac.New(sha256.New, key) + mac.Write(content) + expectedResult := hex.EncodeToString(mac.Sum(nil)) + + hasher := NewHMACSHA256Hashing(key) + actualResult, err := hasher.Hash(content) + require.Nil(t, err) + + if actualResult != expectedResult { + t.Errorf("expected %s, got %s", expectedResult, actualResult) + } +} diff --git a/packages/shared/pkg/keys/key.go b/packages/shared/pkg/keys/key.go new file mode 100644 index 0000000..45fa6b7 --- /dev/null +++ b/packages/shared/pkg/keys/key.go @@ -0,0 +1,84 @@ +package keys + +import ( + "crypto/rand" + "encoding/hex" + "fmt" +) + +const ( + identifierValueSuffixLength = 4 + identifierValuePrefixLength = 2 + + keyLength = 20 +) + +var hasher Hasher = NewSHA256Hashing() + +type Key struct { + PrefixedRawValue string + HashedValue string + Masked MaskedIdentifier +} + +type MaskedIdentifier struct { + Prefix string + ValueLength int + MaskedValuePrefix string + MaskedValueSuffix string +} + +// MaskKey returns identifier masking properties in accordance to the OpenAPI response spec +func MaskKey(prefix, value string) (MaskedIdentifier, error) { + valueLength := len(value) + + suffixOffset := valueLength - identifierValueSuffixLength + prefixOffset := identifierValuePrefixLength + + if suffixOffset < 0 { + return MaskedIdentifier{}, fmt.Errorf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength) + } + + if suffixOffset == 0 { + return MaskedIdentifier{}, fmt.Errorf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength) + } + + // cap prefixOffset by suffixOffset to prevent overlap with the suffix. + if prefixOffset > suffixOffset { + prefixOffset = suffixOffset + } + + maskPrefix := value[:prefixOffset] + maskSuffix := value[suffixOffset:] + + maskedIdentifierProperties := MaskedIdentifier{ + Prefix: prefix, + ValueLength: valueLength, + MaskedValuePrefix: maskPrefix, + MaskedValueSuffix: maskSuffix, + } + + return maskedIdentifierProperties, nil +} + +func GenerateKey(prefix string) (Key, error) { + keyBytes := make([]byte, keyLength) + + _, err := rand.Read(keyBytes) + if err != nil { + return Key{}, err + } + + generatedIdentifier := hex.EncodeToString(keyBytes) + + mask, err := MaskKey(prefix, generatedIdentifier) + if err != nil { + return Key{}, err + } + + return Key{ + PrefixedRawValue: prefix + generatedIdentifier, + HashedValue: hasher.Hash(keyBytes), + Masked: mask, + }, nil +} diff --git a/packages/shared/pkg/keys/key_test.go b/packages/shared/pkg/keys/key_test.go new file mode 100644 index 0000000..c567b00 --- /dev/null +++ b/packages/shared/pkg/keys/key_test.go @@ -0,0 +1,150 @@ +package keys + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaskKey(t *testing.T) { + t.Run("succeeds: value longer than suffix length", func(t *testing.T) { + masked, err := MaskKey("test_", "1234567890") + assert.NoError(t, err) + assert.Equal(t, "test_", masked.Prefix) + assert.Equal(t, "12", masked.MaskedValuePrefix) + assert.Equal(t, "7890", masked.MaskedValueSuffix) + }) + + t.Run("succeeds: empty prefix, value longer than suffix length", func(t *testing.T) { + masked, err := MaskKey("", "1234567890") + assert.NoError(t, err) + assert.Equal(t, "", masked.Prefix) + assert.Equal(t, "12", masked.MaskedValuePrefix) + assert.Equal(t, "7890", masked.MaskedValueSuffix) + }) + + t.Run("error: value length less than suffix length", func(t *testing.T) { + _, err := MaskKey("test", "123") + assert.Error(t, err) + assert.EqualError(t, err, fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength)) + }) + + t.Run("error: value length equals suffix length", func(t *testing.T) { + _, err := MaskKey("test", "1234") + assert.Error(t, err) + assert.EqualError(t, err, fmt.Sprintf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength)) + }) +} + +func TestGenerateKey(t *testing.T) { + keyLength := 40 + + t.Run("succeeds", func(t *testing.T) { + key, err := GenerateKey("test_") + assert.NoError(t, err) + assert.Regexp(t, "^test_.*", key.PrefixedRawValue) + assert.Equal(t, "test_", key.Masked.Prefix) + assert.Equal(t, keyLength, key.Masked.ValueLength) + assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValuePrefixLength)+"}$", key.Masked.MaskedValuePrefix) + assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValueSuffixLength)+"}$", key.Masked.MaskedValueSuffix) + assert.Regexp(t, "^\\$sha256\\$.*", key.HashedValue) + }) + + t.Run("no prefix", func(t *testing.T) { + key, err := GenerateKey("") + assert.NoError(t, err) + assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(keyLength)+"}$", key.PrefixedRawValue) + assert.Equal(t, "", key.Masked.Prefix) + assert.Equal(t, keyLength, key.Masked.ValueLength) + assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValuePrefixLength)+"}$", key.Masked.MaskedValuePrefix) + assert.Regexp(t, "^[0-9a-f]{"+strconv.Itoa(identifierValueSuffixLength)+"}$", key.Masked.MaskedValueSuffix) + assert.Regexp(t, "^\\$sha256\\$.*", key.HashedValue) + }) +} + +func TestGetMaskedIdentifierProperties(t *testing.T) { + type testCase struct { + name string + prefix string + value string + expectedResult MaskedIdentifier + expectedErrString string + } + + testCases := []testCase{ + // --- ERROR CASES (value's length <= identifierValueSuffixLength) --- + { + name: "error: value length < suffix length (3 vs 4)", + prefix: "pk_", + value: "abc", + expectedResult: MaskedIdentifier{}, + expectedErrString: fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength), + }, + { + name: "error: value length == suffix length (4 vs 4)", + prefix: "sk_", + value: "abcd", + expectedResult: MaskedIdentifier{}, + expectedErrString: fmt.Sprintf("mask value length is equal to identifier suffix length (%d), which would expose the entire identifier in the mask", identifierValueSuffixLength), + }, + { + name: "error: value length < suffix length (0 vs 4, empty value)", + prefix: "err_", + value: "", + expectedResult: MaskedIdentifier{}, + expectedErrString: fmt.Sprintf("mask value length is less than identifier suffix length (%d)", identifierValueSuffixLength), + }, + + // --- SUCCESS CASES (value's length > identifierValueSuffixLength) --- + { + name: "success: value long (10), prefix val len fully used", + prefix: "pk_", + value: "abcdefghij", + expectedResult: MaskedIdentifier{ + Prefix: "pk_", + ValueLength: 10, + MaskedValuePrefix: "ab", + MaskedValueSuffix: "ghij", + }, + }, + { + name: "success: value medium (5), prefix val len truncated by overlap", + prefix: "", + value: "abcde", + expectedResult: MaskedIdentifier{ + Prefix: "", + ValueLength: 5, + MaskedValuePrefix: "a", + MaskedValueSuffix: "bcde", + }, + }, + { + name: "success: value medium (6), prefix val len fits exactly", + prefix: "pk_", + value: "abcdef", + expectedResult: MaskedIdentifier{ + Prefix: "pk_", + ValueLength: 6, + MaskedValuePrefix: "ab", + MaskedValueSuffix: "cdef", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := MaskKey(tc.prefix, tc.value) + + if tc.expectedErrString != "" { + assert.Error(t, err) + assert.EqualError(t, err, tc.expectedErrString) + assert.Equal(t, tc.expectedResult, result) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedResult, result) + } + }) + } +} diff --git a/packages/shared/pkg/keys/sha256.go b/packages/shared/pkg/keys/sha256.go new file mode 100644 index 0000000..b66b593 --- /dev/null +++ b/packages/shared/pkg/keys/sha256.go @@ -0,0 +1,29 @@ +package keys + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" +) + +type Sha256Hashing struct{} + +func NewSHA256Hashing() *Sha256Hashing { + return &Sha256Hashing{} +} + +func (h *Sha256Hashing) Hash(key []byte) string { + hashBytes := sha256.Sum256(key) + + hash64 := base64.RawStdEncoding.EncodeToString(hashBytes[:]) + + return fmt.Sprintf( + "$sha256$%s", + hash64, + ) +} + +func (h *Sha256Hashing) HashWithoutPrefix(key []byte) string { + hashBytes := sha256.Sum256(key) + return base64.RawStdEncoding.EncodeToString(hashBytes[:]) +} diff --git a/packages/shared/pkg/keys/sha256_test.go b/packages/shared/pkg/keys/sha256_test.go new file mode 100644 index 0000000..639ab07 --- /dev/null +++ b/packages/shared/pkg/keys/sha256_test.go @@ -0,0 +1,14 @@ +package keys + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSHA256Hashing(t *testing.T) { + hasher := NewSHA256Hashing() + + hashed := hasher.Hash([]byte("test")) + assert.Regexp(t, "^\\$sha256\\$.*", hashed) +} diff --git a/packages/shared/pkg/logger/exporter.go b/packages/shared/pkg/logger/exporter.go new file mode 100644 index 0000000..ef0d8eb --- /dev/null +++ b/packages/shared/pkg/logger/exporter.go @@ -0,0 +1,95 @@ +package logger + +import ( + "bytes" + "context" + "fmt" + "log" + "net/http" + "sync" + "time" + + "go.uber.org/zap/zapcore" +) + +type HTTPWriter struct { + ctx context.Context + url string + httpClient *http.Client + wg sync.WaitGroup +} + +func NewHTTPWriter(ctx context.Context, endpoint string) zapcore.WriteSyncer { + return &HTTPWriter{ + ctx: ctx, + url: endpoint, + httpClient: &http.Client{ + Timeout: 10 * time.Second, + }, + wg: sync.WaitGroup{}, + } +} + +func (h *HTTPWriter) Write(source []byte) (n int, err error) { + h.wg.Add(1) + + // zap is reusing the buffer, so since we're processing it in a Go routine, we need to make a copy. + p := make([]byte, len(source)) + copy(p, source) + + // Run in a goroutine to avoid blocking the main thread + go func() { + defer h.wg.Done() + + start := 0 + for i, b := range p { + if b == '\n' { + if start < i { // Ignore empty lines + line := p[start:i] + if err := h.sendLogLine(line); err != nil { + log.Printf("Failed to send a log line: %s\n", line) + return + } + } + start = i + 1 // Move start to the next line + } + } + + // Handle the last line if there’s no trailing newline + if start < len(p) { + line := p[start:] + if err := h.sendLogLine(line); err != nil { + log.Printf("Failed to send a log line: %s\n", line) + return + } + } + }() + + return len(p), nil +} + +func (h *HTTPWriter) Sync() error { + h.wg.Wait() + return nil +} + +// sendLogLine handles sending ONE log line as an HTTP request +func (h *HTTPWriter) sendLogLine(line []byte) error { + request, err := http.NewRequestWithContext(h.ctx, http.MethodPost, h.url, bytes.NewReader(line)) + if err != nil { + return fmt.Errorf("error sending logs: %w", err) + } + + request.Header.Set("Content-Type", "application/json") + + response, err := h.httpClient.Do(request) + if err != nil { + return fmt.Errorf("error sending logs: %w", err) + } + + err = response.Body.Close() + if err != nil { + return fmt.Errorf("error closing response body: %w", err) + } + return nil +} diff --git a/packages/shared/pkg/logger/fields.go b/packages/shared/pkg/logger/fields.go new file mode 100644 index 0000000..c9de995 --- /dev/null +++ b/packages/shared/pkg/logger/fields.go @@ -0,0 +1,30 @@ +package logger + +import ( + "github.com/google/uuid" + "go.uber.org/zap" +) + +func WithSandboxID(sandboxID string) zap.Field { + return zap.String("sandbox.id", sandboxID) +} + +func WithTemplateID(templateID string) zap.Field { + return zap.String("template.id", templateID) +} + +func WithBuildID(buildID string) zap.Field { + return zap.String("build.id", buildID) +} + +func WithTeamID(teamID string) zap.Field { + return zap.String("team.id", teamID) +} + +func WithClusterID(clusterId uuid.UUID) zap.Field { + return zap.String("cluster.id", clusterId.String()) +} + +func WithClusterNodeID(nodeId string) zap.Field { + return zap.String("cluster.node.id", nodeId) +} diff --git a/packages/shared/pkg/logger/grpc.go b/packages/shared/pkg/logger/grpc.go new file mode 100644 index 0000000..9505099 --- /dev/null +++ b/packages/shared/pkg/logger/grpc.go @@ -0,0 +1,88 @@ +package logger + +import ( + "context" + "fmt" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/selector" + "go.uber.org/zap" +) + +const HealthCheckRoute = "/grpc.health.v1.Health/Check" + +func GRPCLogger(l *zap.Logger) logging.Logger { + return logging.LoggerFunc(func(ctx context.Context, lvl logging.Level, msg string, fields ...any) { + f := make([]zap.Field, 0, len(fields)/2) + + methodFullNameMap := map[string]string{ + "grpc.service": "...", + "grpc.method": "...", + "grpc.method_type": "...", + "grpc.code": "-", + } + + for i := 0; i < len(fields)-1; i += 2 { + key := fields[i] + value := fields[i+1] + + switch v := value.(type) { + case string: + f = append(f, zap.String(key.(string), v)) + + _, ok := methodFullNameMap[key.(string)] + if ok { + methodFullNameMap[key.(string)] = v + } + case int: + f = append(f, zap.Int(key.(string), v)) + case bool: + f = append(f, zap.Bool(key.(string), v)) + default: + f = append(f, zap.Any(key.(string), v)) + } + } + + logger := l.WithOptions(zap.AddCallerSkip(1)).With(f...) + + methodFullName := fmt.Sprintf("%s/%s/%s", + methodFullNameMap["grpc.service"], + methodFullNameMap["grpc.method"], + methodFullNameMap["grpc.method_type"], + ) + if msg == "finished call" || msg == "finished streaming call" { + methodFullName = fmt.Sprintf("%s [%s]", methodFullName, methodFullNameMap["grpc.code"]) + } + + message := fmt.Sprintf("%s: %s", methodFullName, msg) + + switch lvl { + case logging.LevelDebug: + logger.Debug(message) + case logging.LevelInfo: + logger.Info(message) + case logging.LevelWarn: + logger.Warn(message) + case logging.LevelError: + logger.Error(message) + default: + panic(fmt.Sprintf("unknown level %v", lvl)) + } + }) +} + +func WithoutHealthCheck() selector.Matcher { + return WithoutRoutes(HealthCheckRoute) +} + +func WithoutRoutes(routes ...string) selector.Matcher { + return selector.MatchFunc(func(_ context.Context, c interceptors.CallMeta) bool { + for _, route := range routes { + if c.FullMethod() == route { + return false + } + } + return true + }) +} diff --git a/packages/shared/pkg/logger/logger.go b/packages/shared/pkg/logger/logger.go new file mode 100644 index 0000000..2558209 --- /dev/null +++ b/packages/shared/pkg/logger/logger.go @@ -0,0 +1,93 @@ +package logger + +import ( + "context" + "fmt" + "os" + + "go.opentelemetry.io/contrib/bridges/otelzap" + "go.opentelemetry.io/otel/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +type LoggerConfig struct { + // ServiceName is the name of the service that the logger is being created for. + // The service name is added to every log entry. + ServiceName string + // IsInternal differentiates between our (internal) logs, and user accessible (external) logs. + IsInternal bool + // IsDebug enables debug level logging, otherwise zap.InfoLevel level is used. + IsDebug bool + // DisableStacktrace disables stacktraces for the logger. + DisableStacktrace bool + + // InitialFields fields that are added to every log entry. + InitialFields []zap.Field + // Cores additional processing cores for the logger. + Cores []zapcore.Core + // EnableConsole enables console logging. + EnableConsole bool +} + +func NewLogger(_ context.Context, loggerConfig LoggerConfig) (*zap.Logger, error) { + var level zap.AtomicLevel + if loggerConfig.IsDebug { + level = zap.NewAtomicLevelAt(zap.DebugLevel) + } else { + level = zap.NewAtomicLevelAt(zap.InfoLevel) + } + + // Console logging configuration + config := zap.Config{ + DisableStacktrace: loggerConfig.DisableStacktrace, + // Takes stacktraces more liberally + Development: true, + Sampling: nil, + + Encoding: "console", + EncoderConfig: GetConsoleEncoderConfig(), + Level: level, + OutputPaths: []string{}, + ErrorOutputPaths: []string{}, + } + if loggerConfig.EnableConsole { + config.OutputPaths = []string{"stdout"} + config.ErrorOutputPaths = []string{"stderr"} + } + + cores := make([]zapcore.Core, 0) + cores = append(cores, loggerConfig.Cores...) + + logger, err := config.Build( + zap.WrapCore(func(c zapcore.Core) zapcore.Core { + cores = append(cores, c) + + return zapcore.NewTee(cores...) + }), + zap.Fields( + zap.String("service", loggerConfig.ServiceName), + zap.Bool("internal", loggerConfig.IsInternal), + zap.Int("pid", os.Getpid()), + ), + zap.Fields(loggerConfig.InitialFields...), + ) + if err != nil { + return nil, fmt.Errorf("error building logger: %w", err) + } + + return logger, nil +} + +func GetConsoleEncoderConfig() zapcore.EncoderConfig { + cfg := zap.NewDevelopmentEncoderConfig() + cfg.EncodeLevel = zapcore.CapitalColorLevelEncoder + cfg.CallerKey = zapcore.OmitKey + cfg.ConsoleSeparator = " " + + return cfg +} + +func GetOTELCore(provider log.LoggerProvider, serviceName string) zapcore.Core { + return otelzap.NewCore(serviceName, otelzap.WithLoggerProvider(provider)) +} diff --git a/packages/shared/pkg/logger/sandbox/global.go b/packages/shared/pkg/logger/sandbox/global.go new file mode 100644 index 0000000..301a5c5 --- /dev/null +++ b/packages/shared/pkg/logger/sandbox/global.go @@ -0,0 +1,24 @@ +package sbxlogger + +import "go.uber.org/zap" + +var ( + sandboxLoggerInternal *zap.Logger = zap.NewNop() + sandboxLoggerExternal *zap.Logger = zap.NewNop() +) + +func SetSandboxLoggerInternal(logger *zap.Logger) { + sandboxLoggerInternal = logger +} + +func SetSandboxLoggerExternal(logger *zap.Logger) { + sandboxLoggerExternal = logger +} + +func I(m LoggerMetadata) *SandboxLogger { + return &SandboxLogger{sandboxLoggerInternal.With(m.LoggerMetadata().Fields()...)} +} + +func E(m LoggerMetadata) *SandboxLogger { + return &SandboxLogger{sandboxLoggerExternal.With(m.LoggerMetadata().Fields()...)} +} diff --git a/packages/shared/pkg/logger/sandbox/logger.go b/packages/shared/pkg/logger/sandbox/logger.go new file mode 100644 index 0000000..af21d90 --- /dev/null +++ b/packages/shared/pkg/logger/sandbox/logger.go @@ -0,0 +1,71 @@ +package sbxlogger + +import ( + "context" + + "go.opentelemetry.io/otel/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type SandboxLoggerConfig struct { + // ServiceName is the name of the service that the logger is being created for. + // The service name is added to every log entry. + ServiceName string + // IsInternal differentiates between our (internal) logs, and user accessible (external) logs. + // For external logger, we also disable stacktraces + IsInternal bool + CollectorAddress string +} + +func NewLogger(ctx context.Context, loggerProvider log.LoggerProvider, config SandboxLoggerConfig) *zap.Logger { + level := zap.NewAtomicLevelAt(zap.DebugLevel) + + enableConsole := false + var core zapcore.Core + if !config.IsInternal && config.CollectorAddress != "" { + // Add Vector exporter to the core + vectorEncoder := zapcore.NewJSONEncoder(GetSandboxEncoderConfig()) + httpWriter := logger.NewHTTPWriter(ctx, config.CollectorAddress) + core = zapcore.NewCore( + vectorEncoder, + httpWriter, + level, + ) + } else { + core = logger.GetOTELCore(loggerProvider, config.ServiceName) + enableConsole = true + } + + lg, err := logger.NewLogger(ctx, logger.LoggerConfig{ + ServiceName: config.ServiceName, + IsInternal: config.IsInternal, + IsDebug: true, + DisableStacktrace: !config.IsInternal, + InitialFields: []zap.Field{ + zap.String("logger", config.ServiceName), + }, + Cores: []zapcore.Core{core}, + EnableConsole: enableConsole, + }) + if err != nil { + panic(err) + } + + return lg +} + +func GetSandboxEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "timestamp", + MessageKey: "message", + LevelKey: "level", + EncodeLevel: zapcore.LowercaseLevelEncoder, + NameKey: "logger", + StacktraceKey: "stacktrace", + EncodeTime: zapcore.RFC3339NanoTimeEncoder, + LineEnding: zapcore.DefaultLineEnding, + } +} diff --git a/packages/shared/pkg/logger/sandbox/metadata.go b/packages/shared/pkg/logger/sandbox/metadata.go new file mode 100644 index 0000000..1156573 --- /dev/null +++ b/packages/shared/pkg/logger/sandbox/metadata.go @@ -0,0 +1,33 @@ +package sbxlogger + +import ( + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type SandboxMetadata struct { + SandboxID string + TemplateID string + TeamID string +} + +type LoggerMetadata interface { + LoggerMetadata() SandboxMetadata +} + +func (sm SandboxMetadata) LoggerMetadata() SandboxMetadata { + return sm +} + +func (sm SandboxMetadata) Fields() []zap.Field { + return []zap.Field{ + logger.WithSandboxID(sm.SandboxID), + logger.WithTemplateID(sm.TemplateID), + logger.WithTeamID(sm.TeamID), + + // Fields for Vector + zap.String("instanceID", sm.SandboxID), + zap.String("envID", sm.TemplateID), + } +} diff --git a/packages/shared/pkg/logger/sandbox/sandbox_logger.go b/packages/shared/pkg/logger/sandbox/sandbox_logger.go new file mode 100644 index 0000000..e540d72 --- /dev/null +++ b/packages/shared/pkg/logger/sandbox/sandbox_logger.go @@ -0,0 +1,53 @@ +package sbxlogger + +import "go.uber.org/zap" + +type SandboxLogger struct { + *zap.Logger +} + +type HealthCheckAction int + +const ( + Success HealthCheckAction = iota + Fail + ReportSuccess + ReportFail +) + +type SandboxMetricsFields struct { + Timestamp int64 + CPUCount uint32 + CPUUsedPercent float32 + MemTotalMiB uint64 + MemUsedMiB uint64 +} + +func (sl *SandboxLogger) Metrics(metrics SandboxMetricsFields) { + sl.Info( + "", + zap.String("category", "metrics"), + zap.Float32("cpuUsedPct", metrics.CPUUsedPercent), + zap.Uint32("cpuCount", metrics.CPUCount), + zap.Uint64("memTotalMiB", metrics.MemTotalMiB), + zap.Uint64("memUsedMiB", metrics.MemUsedMiB), + ) +} + +func (sl *SandboxLogger) Healthcheck(action HealthCheckAction) { + switch action { + case Success: + sl.Info("Sandbox healthcheck recovered", + zap.Bool("healthcheck", true)) + case Fail: + sl.Error("Sandbox healthcheck started failing", + zap.Bool("healthcheck", false)) + case ReportSuccess: + sl.Info( + "Control sandbox healthcheck was successful", + zap.Bool("healthcheck", true)) + case ReportFail: + sl.Error("Control sandbox healthcheck was unsuccessful", + zap.Bool("healthcheck", false)) + } +} diff --git a/packages/shared/pkg/logging/exporter/exporter.go b/packages/shared/pkg/logging/exporter/exporter.go deleted file mode 100644 index af0a2be..0000000 --- a/packages/shared/pkg/logging/exporter/exporter.go +++ /dev/null @@ -1,111 +0,0 @@ -package exporter - -import ( - "bytes" - "fmt" - "log" - "net/http" - "sync" - "time" - - logsConf "github.com/e2b-dev/infra/packages/shared/pkg/logs" -) - -type HTTPLogsExporter struct { - client http.Client - triggers chan struct{} - logs [][]byte - sync.Mutex - debug bool -} - -func NewHTTPLogsExporter(debug bool) *HTTPLogsExporter { - exporter := &HTTPLogsExporter{ - client: http.Client{ - Timeout: 2 * time.Second, - }, - triggers: make(chan struct{}, 1), - debug: debug, - } - - go exporter.start() - - return exporter -} - -func (w *HTTPLogsExporter) sendInstanceLogs(logs []byte) error { - request, err := http.NewRequest("POST", logsConf.CollectorAddress, bytes.NewBuffer(logs)) - if err != nil { - return err - } - - request.Header.Set("Content-Type", "application/json") - - response, err := w.client.Do(request) - if err != nil { - return err - } - defer response.Body.Close() - - return nil -} - -func (w *HTTPLogsExporter) start() { - for range w.triggers { - logs := w.getAllLogs() - - if len(logs) == 0 { - continue - } - - for _, logEntry := range logs { - if w.debug { - fmt.Printf("%v\n", string(logEntry)) - - continue - } else { - err := w.sendInstanceLogs(logs[0]) - if err != nil { - log.Printf("error sending logs: %v", err) - } - } - } - } -} - -func (w *HTTPLogsExporter) resumeProcessing() { - select { - case w.triggers <- struct{}{}: - default: - // Exporter processing already triggered - // This is expected behavior if the exporter is already processing logs - } -} - -func (w *HTTPLogsExporter) Write(logs []byte) (int, error) { - logsCopy := make([]byte, len(logs)) - copy(logsCopy, logs) - - go w.addLogs(logsCopy) - - return len(logs), nil -} - -func (w *HTTPLogsExporter) getAllLogs() [][]byte { - w.Lock() - defer w.Unlock() - - logs := w.logs - w.logs = nil - - return logs -} - -func (w *HTTPLogsExporter) addLogs(logs []byte) { - w.Lock() - defer w.Unlock() - - w.logs = append(w.logs, logs) - - w.resumeProcessing() -} diff --git a/packages/shared/pkg/logging/grpc.go b/packages/shared/pkg/logging/grpc.go deleted file mode 100644 index 704bb30..0000000 --- a/packages/shared/pkg/logging/grpc.go +++ /dev/null @@ -1,15 +0,0 @@ -package logging - -import grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - -func WithoutHealthCheck() grpc_zap.Option { - return grpc_zap.WithDecider(func(fullMethodName string, err error) bool { - // will not log gRPC calls if it was a call to healthcheck and no error was raised - if err == nil && fullMethodName == "/grpc.health.v1.Health/Check" { - return false - } - - // by default everything will be logged - return true - }) -} diff --git a/packages/shared/pkg/logging/logger.go b/packages/shared/pkg/logging/logger.go deleted file mode 100644 index f91d380..0000000 --- a/packages/shared/pkg/logging/logger.go +++ /dev/null @@ -1,40 +0,0 @@ -package logging - -import ( - "fmt" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func New(isLocal bool) (*zap.SugaredLogger, error) { - config := zap.Config{ - Level: zap.NewAtomicLevelAt(zap.InfoLevel), - Development: isLocal, - DisableStacktrace: !isLocal, - Encoding: "console", - EncoderConfig: zapcore.EncoderConfig{ - TimeKey: "timestamp", - MessageKey: "message", - LevelKey: "level", - EncodeLevel: zapcore.LowercaseLevelEncoder, - NameKey: "logger", - StacktraceKey: "stacktrace", - EncodeTime: zapcore.RFC3339TimeEncoder, - }, - OutputPaths: []string{ - "stdout", - }, - ErrorOutputPaths: []string{ - "stderr", - }, - } - - logger, err := config.Build() - if err != nil { - return nil, fmt.Errorf("error building logger: %w", err) - } - - zap.ReplaceGlobals(logger) - return logger.Sugar(), nil -} diff --git a/packages/shared/pkg/logs/exporter/exporter.go b/packages/shared/pkg/logs/exporter/exporter.go deleted file mode 100644 index 4292d39..0000000 --- a/packages/shared/pkg/logs/exporter/exporter.go +++ /dev/null @@ -1,97 +0,0 @@ -package exporter - -import ( - "bytes" - "context" - "fmt" - "log" - "net/http" - "os" - "sync" - "time" -) - -var debugLogs = os.Getenv("DEBUG_LOGS") == "true" - -type HTTPExporter struct { - sync.Mutex - ctx context.Context - client http.Client - logQueue chan []byte - debug bool - address string -} - -func NewHTTPLogsExporter(ctx context.Context, address string) *HTTPExporter { - exporter := &HTTPExporter{ - client: http.Client{ - Timeout: 2 * time.Second, - }, - logQueue: make(chan []byte, 1024*10), - debug: debugLogs, - ctx: ctx, - address: address, - } - - if address == "" { - fmt.Println("no address provided for logs exporter, logs will not be sent") - } - - if debugLogs { - fmt.Println("debug logs enabled") - } - - go exporter.start() - - return exporter -} - -func (w *HTTPExporter) sendInstanceLogs(logs []byte) error { - if w.address == "" { - return nil - } - - request, err := http.NewRequestWithContext(w.ctx, http.MethodPost, w.address, bytes.NewBuffer(logs)) - if err != nil { - return err - } - - request.Header.Set("Content-Type", "application/json") - - response, err := w.client.Do(request) - if err != nil { - return err - } - - defer response.Body.Close() - - return nil -} - -func (w *HTTPExporter) start() { - for logLine := range w.logQueue { - if w.debug { - fmt.Print(string(logLine)) - } - - err := w.sendInstanceLogs(logLine) - if err != nil { - log.Printf(fmt.Sprintf("error sending instance logs: %+v\n", err)) - } - } -} - -func (w *HTTPExporter) Write(log []byte) (int, error) { - logsCopy := make([]byte, len(log)) - copy(logsCopy, log) - - select { - case w.logQueue <- logsCopy: - default: - if w.debug { - fmt.Println("logs queue is full, dropping logs") - } - } - - return len(log), nil -} diff --git a/packages/shared/pkg/logs/logger.go b/packages/shared/pkg/logs/logger.go deleted file mode 100644 index 2134430..0000000 --- a/packages/shared/pkg/logs/logger.go +++ /dev/null @@ -1,243 +0,0 @@ -package logs - -import ( - "context" - "io" - "math" - "os" - "sync" - "sync/atomic" - "time" - - "github.com/rs/zerolog" - - "github.com/e2b-dev/infra/packages/shared/pkg/logs/exporter" -) - -const ( - OrchestratorServiceName = "orchestrator" - cpuUsageThreshold = 0.85 - memoryUsageThreshold = 0.85 -) - -type sandboxLogExporter struct { - logger *zerolog.Logger -} - -var CollectorAddress = os.Getenv("LOGS_COLLECTOR_ADDRESS") -var CollectorPublicIP = os.Getenv("LOGS_COLLECTOR_PUBLIC_IP") - -func newSandboxLogExporter(serviceName string) *sandboxLogExporter { - zerolog.TimestampFieldName = "timestamp" - zerolog.TimeFieldFormat = time.RFC3339Nano - - ctx := context.Background() - exporters := []io.Writer{exporter.NewHTTPLogsExporter(ctx, CollectorAddress)} - - l := zerolog. - New(io.MultiWriter(exporters...)). - With(). - Timestamp(). - Logger(). - Level(zerolog.DebugLevel). - With().Str("logger", serviceName).Logger() - - return &sandboxLogExporter{ - logger: &l, - } -} - -var ( - logsExporter *sandboxLogExporter - logsExporterMU = sync.Mutex{} -) - -func getSandboxLogExporter() *sandboxLogExporter { - logsExporterMU.Lock() - defer logsExporterMU.Unlock() - - if logsExporter == nil { - logsExporter = newSandboxLogExporter(OrchestratorServiceName) - } - - return logsExporter -} - -type SandboxLogger struct { - exporter *sandboxLogExporter - internal bool - instanceID string - envID string - teamID string - cpuMax int64 - cpuWasAboveTreshold atomic.Bool - memoryMiBMax int64 - memoryWasAbove atomic.Int32 - healthCheckWasFailing atomic.Bool -} - -func NewSandboxLogger( - instanceID string, - envID string, - teamID string, - cpuMax int64, - memoryMax int64, - internal bool, -) *SandboxLogger { - sbxLogExporter := getSandboxLogExporter() - return &SandboxLogger{ - exporter: sbxLogExporter, - instanceID: instanceID, - internal: internal, - envID: envID, - teamID: teamID, - cpuMax: cpuMax, - memoryMiBMax: memoryMax, - } -} - -func (l *SandboxLogger) sendEvent(logger *zerolog.Event, format string, v ...interface{}) { - logger. - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Bool("internal", l.internal). // if this is true, it's sent to internal loki else to grafana cloud - Msgf(format, v...) -} - -func (l *SandboxLogger) GetInternalLogger() *SandboxLogger { - if l.internal { - return l - } - - return NewSandboxLogger(l.instanceID, l.envID, l.teamID, l.cpuMax, l.memoryMiBMax, true) -} - -func (l *SandboxLogger) Errorf( - format string, - v ...interface{}, -) { - l.sendEvent(l.exporter.logger.Error(), format, v...) -} - -func (l *SandboxLogger) Warnf( - format string, - v ...interface{}, -) { - l.sendEvent(l.exporter.logger.Warn(), format, v...) -} - -func (l *SandboxLogger) Infof( - format string, - v ...interface{}, -) { - l.sendEvent(l.exporter.logger.Info(), format, v...) -} - -func (l *SandboxLogger) Debugf( - format string, - v ...interface{}, -) { - l.sendEvent(l.exporter.logger.Debug(), format, v...) -} - -func (l *SandboxLogger) CPUUsage(cpu float64) { - // Round to 3 decimal places and cap at cpuMax - cpu = math.Min(float64(int(cpu*1000))/1000, float64(l.cpuMax)) - if cpu > cpuUsageThreshold*float64(l.cpuMax) { - l.cpuWasAboveTreshold.Store(true) - - l.exporter.logger.Warn(). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Float64("cpuUsage", cpu). - Int64("cpuCount", l.cpuMax). - Msgf("Sandbox is using %d %% of total CPU", int(cpu/float64(l.cpuMax)*100)) - } else if l.cpuWasAboveTreshold.Load() && cpu <= cpuUsageThreshold*float64(l.cpuMax) { - l.cpuWasAboveTreshold.Store(false) - l.exporter.logger.Warn(). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Float64("cpuUsage", cpu). - Int64("cpuCount", l.cpuMax). - Msgf("Sandbox usage fell below %d %% of total cpu", int(cpuUsageThreshold*100)) - } -} - -func (l *SandboxLogger) MemoryUsage(memoryMiB float64) { - // Cap at memoryMBMax - memoryMiB = math.Min(memoryMiB, float64(l.memoryMiBMax)) - if memoryMiB > memoryUsageThreshold*float64(l.memoryMiBMax) && int32(memoryMiB) > l.memoryWasAbove.Load() { - l.memoryWasAbove.Store(int32(memoryMiB)) - l.exporter.logger.Warn(). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Float64("memoryMiBUsed", memoryMiB). - Int64("memoryMiBTotal", l.memoryMiBMax). - Msgf("Sandbox memory used %d %% of RAM", int(memoryMiB/float64(l.memoryMiBMax)*100)) - return - } -} - -func (l *SandboxLogger) Metrics(memTotalMiB, memUsedMiB uint64, cpuCount uint32, cpuUsedPct float32) { - l.exporter.logger.Info(). - Str("category", "metrics"). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Float32("cpuUsedPct", cpuUsedPct). - Uint32("cpuCount", cpuCount). - Uint64("memTotalMiB", memTotalMiB). - Uint64("memUsedMiB", memUsedMiB). - Msg("Metrics") - - return -} - -func (l *SandboxLogger) Healthcheck(ok bool, alwaysReport bool) { - if !ok && !l.healthCheckWasFailing.Load() { - l.healthCheckWasFailing.Store(true) - - l.exporter.logger.Error(). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Bool("healthcheck", ok). - Msg("Sandbox healthcheck started failing") - return - } - if ok && l.healthCheckWasFailing.Load() { - l.healthCheckWasFailing.Store(false) - - l.exporter.logger.Warn(). - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Bool("healthcheck", ok). - Msg("Sandbox healthcheck recovered") - - return - } - - if alwaysReport { - var msg string - var logEvent *zerolog.Event - if ok { - msg = "Control sandbox healthcheck was successful" - logEvent = l.exporter.logger.Info() - } else { - msg = "Control sandbox healthcheck failed" - logEvent = l.exporter.logger.Error() - } - - logEvent. - Str("instanceID", l.instanceID). - Str("envID", l.envID). - Str("teamID", l.teamID). - Bool("healthcheck", ok). - Msg(msg) - } -} diff --git a/packages/shared/pkg/meters/main.go b/packages/shared/pkg/meters/main.go deleted file mode 100644 index fe40761..0000000 --- a/packages/shared/pkg/meters/main.go +++ /dev/null @@ -1,89 +0,0 @@ -package meters - -import ( - "sync" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" -) - -type CounterType string - -const ( - SandboxCreateMeterName CounterType = "api.env.instance.started" -) - -type UpDownCounterType string - -const ( - SandboxCountMeterName UpDownCounterType = "api.env.instance.running" - BuildCounterMeterName = "api.env.build.running" - NewNetworkSlotSPoolCounterMeterName = "orchestrator.network.slots_pool.new" - ReusedNetworkSlotSPoolCounterMeterName = "orchestrator.network.slots_pool.reused" - NBDkSlotSReadyPoolCounterMeterName = "orchestrator.nbd.slots_pool.read" -) - -var meter = otel.GetMeterProvider().Meter("nomad") -var meterLock = sync.Mutex{} -var counters = make(map[CounterType]metric.Int64Counter) -var upDownCounters = make(map[UpDownCounterType]metric.Int64UpDownCounter) - -var counterDesc = map[CounterType]string{ - SandboxCreateMeterName: "Number of currently waiting requests to create a new sandbox", -} - -var counterUnits = map[CounterType]string{ - SandboxCreateMeterName: "{sandbox}", -} - -var upDownCounterDesc = map[UpDownCounterType]string{ - SandboxCountMeterName: "Counter of started instances.", - BuildCounterMeterName: "Counter of running builds.", - ReusedNetworkSlotSPoolCounterMeterName: "Number of reused network slots ready to be used.", - NewNetworkSlotSPoolCounterMeterName: "Number of new network slots ready to be used.", - NBDkSlotSReadyPoolCounterMeterName: "Number of nbd slots ready to be used.", -} - -var upDownCounterUnits = map[UpDownCounterType]string{ - SandboxCountMeterName: "{sandbox}", - BuildCounterMeterName: "{build}", - ReusedNetworkSlotSPoolCounterMeterName: "{slot}", - NewNetworkSlotSPoolCounterMeterName: "{slot}", - NBDkSlotSReadyPoolCounterMeterName: "{slot}", -} - -func GetCounter(name CounterType) (metric.Int64Counter, error) { - meterLock.Lock() - defer meterLock.Unlock() - - if counter, ok := counters[name]; ok { - return counter, nil - } - - counter, err := meter.Int64Counter(string(name), metric.WithDescription(counterDesc[name]), metric.WithUnit(counterUnits[name])) - if err != nil { - return nil, err - } - - counters[name] = counter - - return counter, nil -} - -func GetUpDownCounter(name UpDownCounterType) (metric.Int64UpDownCounter, error) { - meterLock.Lock() - defer meterLock.Unlock() - - if counter, ok := upDownCounters[name]; ok { - return counter, nil - } - - counter, err := meter.Int64UpDownCounter(string(name), metric.WithDescription(upDownCounterDesc[name]), metric.WithUnit(upDownCounterUnits[name])) - if err != nil { - return nil, err - } - - upDownCounters[name] = counter - - return counter, nil -} diff --git a/packages/shared/pkg/models/accesstoken.go b/packages/shared/pkg/models/accesstoken.go index bc83e4c..8f9f7d0 100644 --- a/packages/shared/pkg/models/accesstoken.go +++ b/packages/shared/pkg/models/accesstoken.go @@ -18,7 +18,21 @@ import ( type AccessToken struct { config `json:"-"` // ID of the ent. - ID string `json:"id,omitempty"` + ID uuid.UUID `json:"id,omitempty"` + // AccessToken holds the value of the "access_token" field. + AccessToken string `json:"-"` + // AccessTokenHash holds the value of the "access_token_hash" field. + AccessTokenHash string `json:"-"` + // AccessTokenPrefix holds the value of the "access_token_prefix" field. + AccessTokenPrefix string `json:"access_token_prefix,omitempty"` + // AccessTokenLength holds the value of the "access_token_length" field. + AccessTokenLength int `json:"access_token_length,omitempty"` + // AccessTokenMaskPrefix holds the value of the "access_token_mask_prefix" field. + AccessTokenMaskPrefix string `json:"access_token_mask_prefix,omitempty"` + // AccessTokenMaskSuffix holds the value of the "access_token_mask_suffix" field. + AccessTokenMaskSuffix string `json:"access_token_mask_suffix,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` // UserID holds the value of the "user_id" field. UserID uuid.UUID `json:"user_id,omitempty"` // CreatedAt holds the value of the "created_at" field. @@ -56,11 +70,13 @@ func (*AccessToken) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case accesstoken.FieldID: + case accesstoken.FieldAccessTokenLength: + values[i] = new(sql.NullInt64) + case accesstoken.FieldAccessToken, accesstoken.FieldAccessTokenHash, accesstoken.FieldAccessTokenPrefix, accesstoken.FieldAccessTokenMaskPrefix, accesstoken.FieldAccessTokenMaskSuffix, accesstoken.FieldName: values[i] = new(sql.NullString) case accesstoken.FieldCreatedAt: values[i] = new(sql.NullTime) - case accesstoken.FieldUserID: + case accesstoken.FieldID, accesstoken.FieldUserID: values[i] = new(uuid.UUID) default: values[i] = new(sql.UnknownType) @@ -78,10 +94,52 @@ func (at *AccessToken) assignValues(columns []string, values []any) error { for i := range columns { switch columns[i] { case accesstoken.FieldID: - if value, ok := values[i].(*sql.NullString); !ok { + if value, ok := values[i].(*uuid.UUID); !ok { return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + at.ID = *value + } + case accesstoken.FieldAccessToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_token", values[i]) + } else if value.Valid { + at.AccessToken = value.String + } + case accesstoken.FieldAccessTokenHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_token_hash", values[i]) + } else if value.Valid { + at.AccessTokenHash = value.String + } + case accesstoken.FieldAccessTokenPrefix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_token_prefix", values[i]) + } else if value.Valid { + at.AccessTokenPrefix = value.String + } + case accesstoken.FieldAccessTokenLength: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field access_token_length", values[i]) } else if value.Valid { - at.ID = value.String + at.AccessTokenLength = int(value.Int64) + } + case accesstoken.FieldAccessTokenMaskPrefix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_token_mask_prefix", values[i]) + } else if value.Valid { + at.AccessTokenMaskPrefix = value.String + } + case accesstoken.FieldAccessTokenMaskSuffix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_token_mask_suffix", values[i]) + } else if value.Valid { + at.AccessTokenMaskSuffix = value.String + } + case accesstoken.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + at.Name = value.String } case accesstoken.FieldUserID: if value, ok := values[i].(*uuid.UUID); !ok { @@ -136,6 +194,25 @@ func (at *AccessToken) String() string { var builder strings.Builder builder.WriteString("AccessToken(") builder.WriteString(fmt.Sprintf("id=%v, ", at.ID)) + builder.WriteString("access_token=") + builder.WriteString(", ") + builder.WriteString("access_token_hash=") + builder.WriteString(", ") + builder.WriteString("access_token_prefix=") + builder.WriteString(at.AccessTokenPrefix) + builder.WriteString(", ") + builder.WriteString("access_token_length=") + builder.WriteString(fmt.Sprintf("%v", at.AccessTokenLength)) + builder.WriteString(", ") + builder.WriteString("access_token_mask_prefix=") + builder.WriteString(at.AccessTokenMaskPrefix) + builder.WriteString(", ") + builder.WriteString("access_token_mask_suffix=") + builder.WriteString(at.AccessTokenMaskSuffix) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(at.Name) + builder.WriteString(", ") builder.WriteString("user_id=") builder.WriteString(fmt.Sprintf("%v", at.UserID)) builder.WriteString(", ") diff --git a/packages/shared/pkg/models/accesstoken/accesstoken.go b/packages/shared/pkg/models/accesstoken/accesstoken.go index 8c00ac6..d096fe0 100644 --- a/packages/shared/pkg/models/accesstoken/accesstoken.go +++ b/packages/shared/pkg/models/accesstoken/accesstoken.go @@ -11,15 +11,27 @@ const ( // Label holds the string label denoting the accesstoken type in the database. Label = "access_token" // FieldID holds the string denoting the id field in the database. - FieldID = "access_token" + FieldID = "id" + // FieldAccessToken holds the string denoting the access_token field in the database. + FieldAccessToken = "access_token" + // FieldAccessTokenHash holds the string denoting the access_token_hash field in the database. + FieldAccessTokenHash = "access_token_hash" + // FieldAccessTokenPrefix holds the string denoting the access_token_prefix field in the database. + FieldAccessTokenPrefix = "access_token_prefix" + // FieldAccessTokenLength holds the string denoting the access_token_length field in the database. + FieldAccessTokenLength = "access_token_length" + // FieldAccessTokenMaskPrefix holds the string denoting the access_token_mask_prefix field in the database. + FieldAccessTokenMaskPrefix = "access_token_mask_prefix" + // FieldAccessTokenMaskSuffix holds the string denoting the access_token_mask_suffix field in the database. + FieldAccessTokenMaskSuffix = "access_token_mask_suffix" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" // FieldUserID holds the string denoting the user_id field in the database. FieldUserID = "user_id" // FieldCreatedAt holds the string denoting the created_at field in the database. FieldCreatedAt = "created_at" // EdgeUser holds the string denoting the user edge name in mutations. EdgeUser = "user" - // UserFieldID holds the string denoting the ID field of the User. - UserFieldID = "id" // Table holds the table name of the accesstoken in the database. Table = "access_tokens" // UserTable is the table that holds the user relation/edge. @@ -34,6 +46,13 @@ const ( // Columns holds all SQL columns for accesstoken fields. var Columns = []string{ FieldID, + FieldAccessToken, + FieldAccessTokenHash, + FieldAccessTokenPrefix, + FieldAccessTokenLength, + FieldAccessTokenMaskPrefix, + FieldAccessTokenMaskSuffix, + FieldName, FieldUserID, FieldCreatedAt, } @@ -48,6 +67,11 @@ func ValidColumn(column string) bool { return false } +var ( + // DefaultName holds the default value on creation for the "name" field. + DefaultName string +) + // OrderOption defines the ordering options for the AccessToken queries. type OrderOption func(*sql.Selector) @@ -56,6 +80,41 @@ func ByID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldID, opts...).ToFunc() } +// ByAccessToken orders the results by the access_token field. +func ByAccessToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessToken, opts...).ToFunc() +} + +// ByAccessTokenHash orders the results by the access_token_hash field. +func ByAccessTokenHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessTokenHash, opts...).ToFunc() +} + +// ByAccessTokenPrefix orders the results by the access_token_prefix field. +func ByAccessTokenPrefix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessTokenPrefix, opts...).ToFunc() +} + +// ByAccessTokenLength orders the results by the access_token_length field. +func ByAccessTokenLength(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessTokenLength, opts...).ToFunc() +} + +// ByAccessTokenMaskPrefix orders the results by the access_token_mask_prefix field. +func ByAccessTokenMaskPrefix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessTokenMaskPrefix, opts...).ToFunc() +} + +// ByAccessTokenMaskSuffix orders the results by the access_token_mask_suffix field. +func ByAccessTokenMaskSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessTokenMaskSuffix, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + // ByUserID orders the results by the user_id field. func ByUserID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldUserID, opts...).ToFunc() @@ -75,7 +134,7 @@ func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { func newUserStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(UserInverseTable, UserFieldID), + sqlgraph.To(UserInverseTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), ) } diff --git a/packages/shared/pkg/models/accesstoken/where.go b/packages/shared/pkg/models/accesstoken/where.go index 8952e52..8778853 100644 --- a/packages/shared/pkg/models/accesstoken/where.go +++ b/packages/shared/pkg/models/accesstoken/where.go @@ -13,58 +13,83 @@ import ( ) // ID filters vertices based on their ID field. -func ID(id string) predicate.AccessToken { +func ID(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. -func IDEQ(id string) predicate.AccessToken { +func IDEQ(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id string) predicate.AccessToken { +func IDNEQ(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. -func IDIn(ids ...string) predicate.AccessToken { +func IDIn(ids ...uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...string) predicate.AccessToken { +func IDNotIn(ids ...uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. -func IDGT(id string) predicate.AccessToken { +func IDGT(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. -func IDGTE(id string) predicate.AccessToken { +func IDGTE(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. -func IDLT(id string) predicate.AccessToken { +func IDLT(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. -func IDLTE(id string) predicate.AccessToken { +func IDLTE(id uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldLTE(FieldID, id)) } -// IDEqualFold applies the EqualFold predicate on the ID field. -func IDEqualFold(id string) predicate.AccessToken { - return predicate.AccessToken(sql.FieldEqualFold(FieldID, id)) +// AccessToken applies equality check predicate on the "access_token" field. It's identical to AccessTokenEQ. +func AccessToken(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessToken, v)) } -// IDContainsFold applies the ContainsFold predicate on the ID field. -func IDContainsFold(id string) predicate.AccessToken { - return predicate.AccessToken(sql.FieldContainsFold(FieldID, id)) +// AccessTokenHash applies equality check predicate on the "access_token_hash" field. It's identical to AccessTokenHashEQ. +func AccessTokenHash(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenHash, v)) +} + +// AccessTokenPrefix applies equality check predicate on the "access_token_prefix" field. It's identical to AccessTokenPrefixEQ. +func AccessTokenPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenPrefix, v)) +} + +// AccessTokenLength applies equality check predicate on the "access_token_length" field. It's identical to AccessTokenLengthEQ. +func AccessTokenLength(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenLength, v)) +} + +// AccessTokenMaskPrefix applies equality check predicate on the "access_token_mask_prefix" field. It's identical to AccessTokenMaskPrefixEQ. +func AccessTokenMaskPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskSuffix applies equality check predicate on the "access_token_mask_suffix" field. It's identical to AccessTokenMaskSuffixEQ. +func AccessTokenMaskSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenMaskSuffix, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldName, v)) } // UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. @@ -77,6 +102,436 @@ func CreatedAt(v time.Time) predicate.AccessToken { return predicate.AccessToken(sql.FieldEQ(FieldCreatedAt, v)) } +// AccessTokenEQ applies the EQ predicate on the "access_token" field. +func AccessTokenEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessToken, v)) +} + +// AccessTokenNEQ applies the NEQ predicate on the "access_token" field. +func AccessTokenNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessToken, v)) +} + +// AccessTokenIn applies the In predicate on the "access_token" field. +func AccessTokenIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessToken, vs...)) +} + +// AccessTokenNotIn applies the NotIn predicate on the "access_token" field. +func AccessTokenNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessToken, vs...)) +} + +// AccessTokenGT applies the GT predicate on the "access_token" field. +func AccessTokenGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessToken, v)) +} + +// AccessTokenGTE applies the GTE predicate on the "access_token" field. +func AccessTokenGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessToken, v)) +} + +// AccessTokenLT applies the LT predicate on the "access_token" field. +func AccessTokenLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessToken, v)) +} + +// AccessTokenLTE applies the LTE predicate on the "access_token" field. +func AccessTokenLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessToken, v)) +} + +// AccessTokenContains applies the Contains predicate on the "access_token" field. +func AccessTokenContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldAccessToken, v)) +} + +// AccessTokenHasPrefix applies the HasPrefix predicate on the "access_token" field. +func AccessTokenHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldAccessToken, v)) +} + +// AccessTokenHasSuffix applies the HasSuffix predicate on the "access_token" field. +func AccessTokenHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldAccessToken, v)) +} + +// AccessTokenEqualFold applies the EqualFold predicate on the "access_token" field. +func AccessTokenEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldAccessToken, v)) +} + +// AccessTokenContainsFold applies the ContainsFold predicate on the "access_token" field. +func AccessTokenContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldAccessToken, v)) +} + +// AccessTokenHashEQ applies the EQ predicate on the "access_token_hash" field. +func AccessTokenHashEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenHash, v)) +} + +// AccessTokenHashNEQ applies the NEQ predicate on the "access_token_hash" field. +func AccessTokenHashNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessTokenHash, v)) +} + +// AccessTokenHashIn applies the In predicate on the "access_token_hash" field. +func AccessTokenHashIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessTokenHash, vs...)) +} + +// AccessTokenHashNotIn applies the NotIn predicate on the "access_token_hash" field. +func AccessTokenHashNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessTokenHash, vs...)) +} + +// AccessTokenHashGT applies the GT predicate on the "access_token_hash" field. +func AccessTokenHashGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessTokenHash, v)) +} + +// AccessTokenHashGTE applies the GTE predicate on the "access_token_hash" field. +func AccessTokenHashGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessTokenHash, v)) +} + +// AccessTokenHashLT applies the LT predicate on the "access_token_hash" field. +func AccessTokenHashLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessTokenHash, v)) +} + +// AccessTokenHashLTE applies the LTE predicate on the "access_token_hash" field. +func AccessTokenHashLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessTokenHash, v)) +} + +// AccessTokenHashContains applies the Contains predicate on the "access_token_hash" field. +func AccessTokenHashContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldAccessTokenHash, v)) +} + +// AccessTokenHashHasPrefix applies the HasPrefix predicate on the "access_token_hash" field. +func AccessTokenHashHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldAccessTokenHash, v)) +} + +// AccessTokenHashHasSuffix applies the HasSuffix predicate on the "access_token_hash" field. +func AccessTokenHashHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldAccessTokenHash, v)) +} + +// AccessTokenHashEqualFold applies the EqualFold predicate on the "access_token_hash" field. +func AccessTokenHashEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldAccessTokenHash, v)) +} + +// AccessTokenHashContainsFold applies the ContainsFold predicate on the "access_token_hash" field. +func AccessTokenHashContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldAccessTokenHash, v)) +} + +// AccessTokenPrefixEQ applies the EQ predicate on the "access_token_prefix" field. +func AccessTokenPrefixEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixNEQ applies the NEQ predicate on the "access_token_prefix" field. +func AccessTokenPrefixNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixIn applies the In predicate on the "access_token_prefix" field. +func AccessTokenPrefixIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessTokenPrefix, vs...)) +} + +// AccessTokenPrefixNotIn applies the NotIn predicate on the "access_token_prefix" field. +func AccessTokenPrefixNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessTokenPrefix, vs...)) +} + +// AccessTokenPrefixGT applies the GT predicate on the "access_token_prefix" field. +func AccessTokenPrefixGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixGTE applies the GTE predicate on the "access_token_prefix" field. +func AccessTokenPrefixGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixLT applies the LT predicate on the "access_token_prefix" field. +func AccessTokenPrefixLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixLTE applies the LTE predicate on the "access_token_prefix" field. +func AccessTokenPrefixLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixContains applies the Contains predicate on the "access_token_prefix" field. +func AccessTokenPrefixContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixHasPrefix applies the HasPrefix predicate on the "access_token_prefix" field. +func AccessTokenPrefixHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixHasSuffix applies the HasSuffix predicate on the "access_token_prefix" field. +func AccessTokenPrefixHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixEqualFold applies the EqualFold predicate on the "access_token_prefix" field. +func AccessTokenPrefixEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldAccessTokenPrefix, v)) +} + +// AccessTokenPrefixContainsFold applies the ContainsFold predicate on the "access_token_prefix" field. +func AccessTokenPrefixContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldAccessTokenPrefix, v)) +} + +// AccessTokenLengthEQ applies the EQ predicate on the "access_token_length" field. +func AccessTokenLengthEQ(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenLength, v)) +} + +// AccessTokenLengthNEQ applies the NEQ predicate on the "access_token_length" field. +func AccessTokenLengthNEQ(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessTokenLength, v)) +} + +// AccessTokenLengthIn applies the In predicate on the "access_token_length" field. +func AccessTokenLengthIn(vs ...int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessTokenLength, vs...)) +} + +// AccessTokenLengthNotIn applies the NotIn predicate on the "access_token_length" field. +func AccessTokenLengthNotIn(vs ...int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessTokenLength, vs...)) +} + +// AccessTokenLengthGT applies the GT predicate on the "access_token_length" field. +func AccessTokenLengthGT(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessTokenLength, v)) +} + +// AccessTokenLengthGTE applies the GTE predicate on the "access_token_length" field. +func AccessTokenLengthGTE(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessTokenLength, v)) +} + +// AccessTokenLengthLT applies the LT predicate on the "access_token_length" field. +func AccessTokenLengthLT(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessTokenLength, v)) +} + +// AccessTokenLengthLTE applies the LTE predicate on the "access_token_length" field. +func AccessTokenLengthLTE(v int) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessTokenLength, v)) +} + +// AccessTokenMaskPrefixEQ applies the EQ predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixNEQ applies the NEQ predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixIn applies the In predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessTokenMaskPrefix, vs...)) +} + +// AccessTokenMaskPrefixNotIn applies the NotIn predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessTokenMaskPrefix, vs...)) +} + +// AccessTokenMaskPrefixGT applies the GT predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixGTE applies the GTE predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixLT applies the LT predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixLTE applies the LTE predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixContains applies the Contains predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixHasPrefix applies the HasPrefix predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixHasSuffix applies the HasSuffix predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixEqualFold applies the EqualFold predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskPrefixContainsFold applies the ContainsFold predicate on the "access_token_mask_prefix" field. +func AccessTokenMaskPrefixContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldAccessTokenMaskPrefix, v)) +} + +// AccessTokenMaskSuffixEQ applies the EQ predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixNEQ applies the NEQ predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixIn applies the In predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldAccessTokenMaskSuffix, vs...)) +} + +// AccessTokenMaskSuffixNotIn applies the NotIn predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldAccessTokenMaskSuffix, vs...)) +} + +// AccessTokenMaskSuffixGT applies the GT predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixGTE applies the GTE predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixLT applies the LT predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixLTE applies the LTE predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixContains applies the Contains predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixHasPrefix applies the HasPrefix predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixHasSuffix applies the HasSuffix predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixEqualFold applies the EqualFold predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldAccessTokenMaskSuffix, v)) +} + +// AccessTokenMaskSuffixContainsFold applies the ContainsFold predicate on the "access_token_mask_suffix" field. +func AccessTokenMaskSuffixContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldAccessTokenMaskSuffix, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.AccessToken { + return predicate.AccessToken(sql.FieldContainsFold(FieldName, v)) +} + // UserIDEQ applies the EQ predicate on the "user_id" field. func UserIDEQ(v uuid.UUID) predicate.AccessToken { return predicate.AccessToken(sql.FieldEQ(FieldUserID, v)) diff --git a/packages/shared/pkg/models/accesstoken_create.go b/packages/shared/pkg/models/accesstoken_create.go index f6432e1..889c190 100644 --- a/packages/shared/pkg/models/accesstoken_create.go +++ b/packages/shared/pkg/models/accesstoken_create.go @@ -25,6 +25,56 @@ type AccessTokenCreate struct { conflict []sql.ConflictOption } +// SetAccessToken sets the "access_token" field. +func (atc *AccessTokenCreate) SetAccessToken(s string) *AccessTokenCreate { + atc.mutation.SetAccessToken(s) + return atc +} + +// SetAccessTokenHash sets the "access_token_hash" field. +func (atc *AccessTokenCreate) SetAccessTokenHash(s string) *AccessTokenCreate { + atc.mutation.SetAccessTokenHash(s) + return atc +} + +// SetAccessTokenPrefix sets the "access_token_prefix" field. +func (atc *AccessTokenCreate) SetAccessTokenPrefix(s string) *AccessTokenCreate { + atc.mutation.SetAccessTokenPrefix(s) + return atc +} + +// SetAccessTokenLength sets the "access_token_length" field. +func (atc *AccessTokenCreate) SetAccessTokenLength(i int) *AccessTokenCreate { + atc.mutation.SetAccessTokenLength(i) + return atc +} + +// SetAccessTokenMaskPrefix sets the "access_token_mask_prefix" field. +func (atc *AccessTokenCreate) SetAccessTokenMaskPrefix(s string) *AccessTokenCreate { + atc.mutation.SetAccessTokenMaskPrefix(s) + return atc +} + +// SetAccessTokenMaskSuffix sets the "access_token_mask_suffix" field. +func (atc *AccessTokenCreate) SetAccessTokenMaskSuffix(s string) *AccessTokenCreate { + atc.mutation.SetAccessTokenMaskSuffix(s) + return atc +} + +// SetName sets the "name" field. +func (atc *AccessTokenCreate) SetName(s string) *AccessTokenCreate { + atc.mutation.SetName(s) + return atc +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (atc *AccessTokenCreate) SetNillableName(s *string) *AccessTokenCreate { + if s != nil { + atc.SetName(*s) + } + return atc +} + // SetUserID sets the "user_id" field. func (atc *AccessTokenCreate) SetUserID(u uuid.UUID) *AccessTokenCreate { atc.mutation.SetUserID(u) @@ -46,8 +96,8 @@ func (atc *AccessTokenCreate) SetNillableCreatedAt(t *time.Time) *AccessTokenCre } // SetID sets the "id" field. -func (atc *AccessTokenCreate) SetID(s string) *AccessTokenCreate { - atc.mutation.SetID(s) +func (atc *AccessTokenCreate) SetID(u uuid.UUID) *AccessTokenCreate { + atc.mutation.SetID(u) return atc } @@ -63,6 +113,7 @@ func (atc *AccessTokenCreate) Mutation() *AccessTokenMutation { // Save creates the AccessToken in the database. func (atc *AccessTokenCreate) Save(ctx context.Context) (*AccessToken, error) { + atc.defaults() return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks) } @@ -88,8 +139,37 @@ func (atc *AccessTokenCreate) ExecX(ctx context.Context) { } } +// defaults sets the default values of the builder before save. +func (atc *AccessTokenCreate) defaults() { + if _, ok := atc.mutation.Name(); !ok { + v := accesstoken.DefaultName + atc.mutation.SetName(v) + } +} + // check runs all checks and user-defined validators on the builder. func (atc *AccessTokenCreate) check() error { + if _, ok := atc.mutation.AccessToken(); !ok { + return &ValidationError{Name: "access_token", err: errors.New(`models: missing required field "AccessToken.access_token"`)} + } + if _, ok := atc.mutation.AccessTokenHash(); !ok { + return &ValidationError{Name: "access_token_hash", err: errors.New(`models: missing required field "AccessToken.access_token_hash"`)} + } + if _, ok := atc.mutation.AccessTokenPrefix(); !ok { + return &ValidationError{Name: "access_token_prefix", err: errors.New(`models: missing required field "AccessToken.access_token_prefix"`)} + } + if _, ok := atc.mutation.AccessTokenLength(); !ok { + return &ValidationError{Name: "access_token_length", err: errors.New(`models: missing required field "AccessToken.access_token_length"`)} + } + if _, ok := atc.mutation.AccessTokenMaskPrefix(); !ok { + return &ValidationError{Name: "access_token_mask_prefix", err: errors.New(`models: missing required field "AccessToken.access_token_mask_prefix"`)} + } + if _, ok := atc.mutation.AccessTokenMaskSuffix(); !ok { + return &ValidationError{Name: "access_token_mask_suffix", err: errors.New(`models: missing required field "AccessToken.access_token_mask_suffix"`)} + } + if _, ok := atc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`models: missing required field "AccessToken.name"`)} + } if _, ok := atc.mutation.UserID(); !ok { return &ValidationError{Name: "user_id", err: errors.New(`models: missing required field "AccessToken.user_id"`)} } @@ -111,10 +191,10 @@ func (atc *AccessTokenCreate) sqlSave(ctx context.Context) (*AccessToken, error) return nil, err } if _spec.ID.Value != nil { - if id, ok := _spec.ID.Value.(string); ok { - _node.ID = id - } else { - return nil, fmt.Errorf("unexpected AccessToken.ID type: %T", _spec.ID.Value) + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err } } atc.mutation.id = &_node.ID @@ -125,13 +205,41 @@ func (atc *AccessTokenCreate) sqlSave(ctx context.Context) (*AccessToken, error) func (atc *AccessTokenCreate) createSpec() (*AccessToken, *sqlgraph.CreateSpec) { var ( _node = &AccessToken{config: atc.config} - _spec = sqlgraph.NewCreateSpec(accesstoken.Table, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString)) + _spec = sqlgraph.NewCreateSpec(accesstoken.Table, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID)) ) _spec.Schema = atc.schemaConfig.AccessToken _spec.OnConflict = atc.conflict if id, ok := atc.mutation.ID(); ok { _node.ID = id - _spec.ID.Value = id + _spec.ID.Value = &id + } + if value, ok := atc.mutation.AccessToken(); ok { + _spec.SetField(accesstoken.FieldAccessToken, field.TypeString, value) + _node.AccessToken = value + } + if value, ok := atc.mutation.AccessTokenHash(); ok { + _spec.SetField(accesstoken.FieldAccessTokenHash, field.TypeString, value) + _node.AccessTokenHash = value + } + if value, ok := atc.mutation.AccessTokenPrefix(); ok { + _spec.SetField(accesstoken.FieldAccessTokenPrefix, field.TypeString, value) + _node.AccessTokenPrefix = value + } + if value, ok := atc.mutation.AccessTokenLength(); ok { + _spec.SetField(accesstoken.FieldAccessTokenLength, field.TypeInt, value) + _node.AccessTokenLength = value + } + if value, ok := atc.mutation.AccessTokenMaskPrefix(); ok { + _spec.SetField(accesstoken.FieldAccessTokenMaskPrefix, field.TypeString, value) + _node.AccessTokenMaskPrefix = value + } + if value, ok := atc.mutation.AccessTokenMaskSuffix(); ok { + _spec.SetField(accesstoken.FieldAccessTokenMaskSuffix, field.TypeString, value) + _node.AccessTokenMaskSuffix = value + } + if value, ok := atc.mutation.Name(); ok { + _spec.SetField(accesstoken.FieldName, field.TypeString, value) + _node.Name = value } if value, ok := atc.mutation.CreatedAt(); ok { _spec.SetField(accesstoken.FieldCreatedAt, field.TypeTime, value) @@ -162,7 +270,7 @@ func (atc *AccessTokenCreate) createSpec() (*AccessToken, *sqlgraph.CreateSpec) // of the `INSERT` statement. For example: // // client.AccessToken.Create(). -// SetUserID(v). +// SetAccessToken(v). // OnConflict( // // Update the row with the new values // // the was proposed for insertion. @@ -171,7 +279,7 @@ func (atc *AccessTokenCreate) createSpec() (*AccessToken, *sqlgraph.CreateSpec) // // Override some of the fields with custom // // update values. // Update(func(u *ent.AccessTokenUpsert) { -// SetUserID(v+v). +// SetAccessToken(v+v). // }). // Exec(ctx) func (atc *AccessTokenCreate) OnConflict(opts ...sql.ConflictOption) *AccessTokenUpsertOne { @@ -207,6 +315,18 @@ type ( } ) +// SetName sets the "name" field. +func (u *AccessTokenUpsert) SetName(v string) *AccessTokenUpsert { + u.Set(accesstoken.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccessTokenUpsert) UpdateName() *AccessTokenUpsert { + u.SetExcluded(accesstoken.FieldName) + return u +} + // SetUserID sets the "user_id" field. func (u *AccessTokenUpsert) SetUserID(v uuid.UUID) *AccessTokenUpsert { u.Set(accesstoken.FieldUserID, v) @@ -236,6 +356,24 @@ func (u *AccessTokenUpsertOne) UpdateNewValues() *AccessTokenUpsertOne { if _, exists := u.create.mutation.ID(); exists { s.SetIgnore(accesstoken.FieldID) } + if _, exists := u.create.mutation.AccessToken(); exists { + s.SetIgnore(accesstoken.FieldAccessToken) + } + if _, exists := u.create.mutation.AccessTokenHash(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenHash) + } + if _, exists := u.create.mutation.AccessTokenPrefix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenPrefix) + } + if _, exists := u.create.mutation.AccessTokenLength(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenLength) + } + if _, exists := u.create.mutation.AccessTokenMaskPrefix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenMaskPrefix) + } + if _, exists := u.create.mutation.AccessTokenMaskSuffix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenMaskSuffix) + } if _, exists := u.create.mutation.CreatedAt(); exists { s.SetIgnore(accesstoken.FieldCreatedAt) } @@ -270,6 +408,20 @@ func (u *AccessTokenUpsertOne) Update(set func(*AccessTokenUpsert)) *AccessToken return u } +// SetName sets the "name" field. +func (u *AccessTokenUpsertOne) SetName(v string) *AccessTokenUpsertOne { + return u.Update(func(s *AccessTokenUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccessTokenUpsertOne) UpdateName() *AccessTokenUpsertOne { + return u.Update(func(s *AccessTokenUpsert) { + s.UpdateName() + }) +} + // SetUserID sets the "user_id" field. func (u *AccessTokenUpsertOne) SetUserID(v uuid.UUID) *AccessTokenUpsertOne { return u.Update(func(s *AccessTokenUpsert) { @@ -300,7 +452,7 @@ func (u *AccessTokenUpsertOne) ExecX(ctx context.Context) { } // Exec executes the UPSERT query and returns the inserted/updated ID. -func (u *AccessTokenUpsertOne) ID(ctx context.Context) (id string, err error) { +func (u *AccessTokenUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { if u.create.driver.Dialect() == dialect.MySQL { // In case of "ON CONFLICT", there is no way to get back non-numeric ID // fields from the database since MySQL does not support the RETURNING clause. @@ -314,7 +466,7 @@ func (u *AccessTokenUpsertOne) ID(ctx context.Context) (id string, err error) { } // IDX is like ID, but panics if an error occurs. -func (u *AccessTokenUpsertOne) IDX(ctx context.Context) string { +func (u *AccessTokenUpsertOne) IDX(ctx context.Context) uuid.UUID { id, err := u.ID(ctx) if err != nil { panic(err) @@ -341,6 +493,7 @@ func (atcb *AccessTokenCreateBulk) Save(ctx context.Context) ([]*AccessToken, er for i := range atcb.builders { func(i int, root context.Context) { builder := atcb.builders[i] + builder.defaults() var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { mutation, ok := m.(*AccessTokenMutation) if !ok { @@ -419,7 +572,7 @@ func (atcb *AccessTokenCreateBulk) ExecX(ctx context.Context) { // // Override some of the fields with custom // // update values. // Update(func(u *ent.AccessTokenUpsert) { -// SetUserID(v+v). +// SetAccessToken(v+v). // }). // Exec(ctx) func (atcb *AccessTokenCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccessTokenUpsertBulk { @@ -466,6 +619,24 @@ func (u *AccessTokenUpsertBulk) UpdateNewValues() *AccessTokenUpsertBulk { if _, exists := b.mutation.ID(); exists { s.SetIgnore(accesstoken.FieldID) } + if _, exists := b.mutation.AccessToken(); exists { + s.SetIgnore(accesstoken.FieldAccessToken) + } + if _, exists := b.mutation.AccessTokenHash(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenHash) + } + if _, exists := b.mutation.AccessTokenPrefix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenPrefix) + } + if _, exists := b.mutation.AccessTokenLength(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenLength) + } + if _, exists := b.mutation.AccessTokenMaskPrefix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenMaskPrefix) + } + if _, exists := b.mutation.AccessTokenMaskSuffix(); exists { + s.SetIgnore(accesstoken.FieldAccessTokenMaskSuffix) + } if _, exists := b.mutation.CreatedAt(); exists { s.SetIgnore(accesstoken.FieldCreatedAt) } @@ -501,6 +672,20 @@ func (u *AccessTokenUpsertBulk) Update(set func(*AccessTokenUpsert)) *AccessToke return u } +// SetName sets the "name" field. +func (u *AccessTokenUpsertBulk) SetName(v string) *AccessTokenUpsertBulk { + return u.Update(func(s *AccessTokenUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccessTokenUpsertBulk) UpdateName() *AccessTokenUpsertBulk { + return u.Update(func(s *AccessTokenUpsert) { + s.UpdateName() + }) +} + // SetUserID sets the "user_id" field. func (u *AccessTokenUpsertBulk) SetUserID(v uuid.UUID) *AccessTokenUpsertBulk { return u.Update(func(s *AccessTokenUpsert) { diff --git a/packages/shared/pkg/models/accesstoken_delete.go b/packages/shared/pkg/models/accesstoken_delete.go index 5bdbaf2..94d599f 100644 --- a/packages/shared/pkg/models/accesstoken_delete.go +++ b/packages/shared/pkg/models/accesstoken_delete.go @@ -41,7 +41,7 @@ func (atd *AccessTokenDelete) ExecX(ctx context.Context) int { } func (atd *AccessTokenDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(accesstoken.Table, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString)) + _spec := sqlgraph.NewDeleteSpec(accesstoken.Table, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID)) _spec.Node.Schema = atd.schemaConfig.AccessToken ctx = internal.NewSchemaConfigContext(ctx, atd.schemaConfig) if ps := atd.mutation.predicates; len(ps) > 0 { diff --git a/packages/shared/pkg/models/accesstoken_query.go b/packages/shared/pkg/models/accesstoken_query.go index 1a3fffa..11b9a00 100644 --- a/packages/shared/pkg/models/accesstoken_query.go +++ b/packages/shared/pkg/models/accesstoken_query.go @@ -111,8 +111,8 @@ func (atq *AccessTokenQuery) FirstX(ctx context.Context) *AccessToken { // FirstID returns the first AccessToken ID from the query. // Returns a *NotFoundError when no AccessToken ID was found. -func (atq *AccessTokenQuery) FirstID(ctx context.Context) (id string, err error) { - var ids []string +func (atq *AccessTokenQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, "FirstID")); err != nil { return } @@ -124,7 +124,7 @@ func (atq *AccessTokenQuery) FirstID(ctx context.Context) (id string, err error) } // FirstIDX is like FirstID, but panics if an error occurs. -func (atq *AccessTokenQuery) FirstIDX(ctx context.Context) string { +func (atq *AccessTokenQuery) FirstIDX(ctx context.Context) uuid.UUID { id, err := atq.FirstID(ctx) if err != nil && !IsNotFound(err) { panic(err) @@ -162,8 +162,8 @@ func (atq *AccessTokenQuery) OnlyX(ctx context.Context) *AccessToken { // OnlyID is like Only, but returns the only AccessToken ID in the query. // Returns a *NotSingularError when more than one AccessToken ID is found. // Returns a *NotFoundError when no entities are found. -func (atq *AccessTokenQuery) OnlyID(ctx context.Context) (id string, err error) { - var ids []string +func (atq *AccessTokenQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, "OnlyID")); err != nil { return } @@ -179,7 +179,7 @@ func (atq *AccessTokenQuery) OnlyID(ctx context.Context) (id string, err error) } // OnlyIDX is like OnlyID, but panics if an error occurs. -func (atq *AccessTokenQuery) OnlyIDX(ctx context.Context) string { +func (atq *AccessTokenQuery) OnlyIDX(ctx context.Context) uuid.UUID { id, err := atq.OnlyID(ctx) if err != nil { panic(err) @@ -207,7 +207,7 @@ func (atq *AccessTokenQuery) AllX(ctx context.Context) []*AccessToken { } // IDs executes the query and returns a list of AccessToken IDs. -func (atq *AccessTokenQuery) IDs(ctx context.Context) (ids []string, err error) { +func (atq *AccessTokenQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { if atq.ctx.Unique == nil && atq.path != nil { atq.Unique(true) } @@ -219,7 +219,7 @@ func (atq *AccessTokenQuery) IDs(ctx context.Context) (ids []string, err error) } // IDsX is like IDs, but panics if an error occurs. -func (atq *AccessTokenQuery) IDsX(ctx context.Context) []string { +func (atq *AccessTokenQuery) IDsX(ctx context.Context) []uuid.UUID { ids, err := atq.IDs(ctx) if err != nil { panic(err) @@ -303,12 +303,12 @@ func (atq *AccessTokenQuery) WithUser(opts ...func(*UserQuery)) *AccessTokenQuer // Example: // // var v []struct { -// UserID uuid.UUID `json:"user_id,omitempty"` +// AccessToken string `json:"access_token,omitempty"` // Count int `json:"count,omitempty"` // } // // client.AccessToken.Query(). -// GroupBy(accesstoken.FieldUserID). +// GroupBy(accesstoken.FieldAccessToken). // Aggregate(models.Count()). // Scan(ctx, &v) func (atq *AccessTokenQuery) GroupBy(field string, fields ...string) *AccessTokenGroupBy { @@ -326,11 +326,11 @@ func (atq *AccessTokenQuery) GroupBy(field string, fields ...string) *AccessToke // Example: // // var v []struct { -// UserID uuid.UUID `json:"user_id,omitempty"` +// AccessToken string `json:"access_token,omitempty"` // } // // client.AccessToken.Query(). -// Select(accesstoken.FieldUserID). +// Select(accesstoken.FieldAccessToken). // Scan(ctx, &v) func (atq *AccessTokenQuery) Select(fields ...string) *AccessTokenSelect { atq.ctx.Fields = append(atq.ctx.Fields, fields...) @@ -456,7 +456,7 @@ func (atq *AccessTokenQuery) sqlCount(ctx context.Context) (int, error) { } func (atq *AccessTokenQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString)) + _spec := sqlgraph.NewQuerySpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID)) _spec.From = atq.sql if unique := atq.ctx.Unique; unique != nil { _spec.Unique = *unique diff --git a/packages/shared/pkg/models/accesstoken_update.go b/packages/shared/pkg/models/accesstoken_update.go index 9aa97af..f5d9f75 100644 --- a/packages/shared/pkg/models/accesstoken_update.go +++ b/packages/shared/pkg/models/accesstoken_update.go @@ -31,6 +31,20 @@ func (atu *AccessTokenUpdate) Where(ps ...predicate.AccessToken) *AccessTokenUpd return atu } +// SetName sets the "name" field. +func (atu *AccessTokenUpdate) SetName(s string) *AccessTokenUpdate { + atu.mutation.SetName(s) + return atu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (atu *AccessTokenUpdate) SetNillableName(s *string) *AccessTokenUpdate { + if s != nil { + atu.SetName(*s) + } + return atu +} + // SetUserID sets the "user_id" field. func (atu *AccessTokenUpdate) SetUserID(u uuid.UUID) *AccessTokenUpdate { atu.mutation.SetUserID(u) @@ -106,7 +120,7 @@ func (atu *AccessTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { if err := atu.check(); err != nil { return n, err } - _spec := sqlgraph.NewUpdateSpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString)) + _spec := sqlgraph.NewUpdateSpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID)) if ps := atu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -114,6 +128,9 @@ func (atu *AccessTokenUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } + if value, ok := atu.mutation.Name(); ok { + _spec.SetField(accesstoken.FieldName, field.TypeString, value) + } if atu.mutation.CreatedAtCleared() { _spec.ClearField(accesstoken.FieldCreatedAt, field.TypeTime) } @@ -172,6 +189,20 @@ type AccessTokenUpdateOne struct { modifiers []func(*sql.UpdateBuilder) } +// SetName sets the "name" field. +func (atuo *AccessTokenUpdateOne) SetName(s string) *AccessTokenUpdateOne { + atuo.mutation.SetName(s) + return atuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (atuo *AccessTokenUpdateOne) SetNillableName(s *string) *AccessTokenUpdateOne { + if s != nil { + atuo.SetName(*s) + } + return atuo +} + // SetUserID sets the "user_id" field. func (atuo *AccessTokenUpdateOne) SetUserID(u uuid.UUID) *AccessTokenUpdateOne { atuo.mutation.SetUserID(u) @@ -260,7 +291,7 @@ func (atuo *AccessTokenUpdateOne) sqlSave(ctx context.Context) (_node *AccessTok if err := atuo.check(); err != nil { return _node, err } - _spec := sqlgraph.NewUpdateSpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString)) + _spec := sqlgraph.NewUpdateSpec(accesstoken.Table, accesstoken.Columns, sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID)) id, ok := atuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`models: missing "AccessToken.id" for update`)} @@ -285,6 +316,9 @@ func (atuo *AccessTokenUpdateOne) sqlSave(ctx context.Context) (_node *AccessTok } } } + if value, ok := atuo.mutation.Name(); ok { + _spec.SetField(accesstoken.FieldName, field.TypeString, value) + } if atuo.mutation.CreatedAtCleared() { _spec.ClearField(accesstoken.FieldCreatedAt, field.TypeTime) } diff --git a/packages/shared/pkg/models/chmodels/clickhouse.go b/packages/shared/pkg/models/chmodels/clickhouse.go new file mode 100644 index 0000000..bb09ed7 --- /dev/null +++ b/packages/shared/pkg/models/chmodels/clickhouse.go @@ -0,0 +1,9 @@ +package chmodels + +type ClickhouseColumn struct { + Database string `ch:"database"` + Table string `ch:"table"` + Name string `ch:"name"` + Type string `ch:"type"` + Position uint64 `ch:"position"` +} diff --git a/packages/shared/pkg/models/chmodels/metrics.go b/packages/shared/pkg/models/chmodels/metrics.go new file mode 100644 index 0000000..e2380b2 --- /dev/null +++ b/packages/shared/pkg/models/chmodels/metrics.go @@ -0,0 +1,13 @@ +package chmodels + +import "time" + +type Metrics struct { + Timestamp time.Time `ch:"timestamp"` + SandboxID string `ch:"sandbox_id"` + TeamID string `ch:"team_id"` + CPUCount uint32 `ch:"cpu_count"` + CPUUsedPercent float32 `ch:"cpu_used_pct"` + MemTotalMiB uint64 `ch:"mem_total_mib"` + MemUsedMiB uint64 `ch:"mem_used_mib"` +} diff --git a/packages/shared/pkg/models/client.go b/packages/shared/pkg/models/client.go index ff59f3d..af7d725 100644 --- a/packages/shared/pkg/models/client.go +++ b/packages/shared/pkg/models/client.go @@ -17,6 +17,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" "github.com/e2b-dev/infra/packages/shared/pkg/models/env" "github.com/e2b-dev/infra/packages/shared/pkg/models/envalias" "github.com/e2b-dev/infra/packages/shared/pkg/models/envbuild" @@ -37,6 +38,8 @@ type Client struct { Schema *migrate.Schema // AccessToken is the client for interacting with the AccessToken builders. AccessToken *AccessTokenClient + // Cluster is the client for interacting with the Cluster builders. + Cluster *ClusterClient // Env is the client for interacting with the Env builders. Env *EnvClient // EnvAlias is the client for interacting with the EnvAlias builders. @@ -67,6 +70,7 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) c.AccessToken = NewAccessTokenClient(c.config) + c.Cluster = NewClusterClient(c.config) c.Env = NewEnvClient(c.config) c.EnvAlias = NewEnvAliasClient(c.config) c.EnvBuild = NewEnvBuildClient(c.config) @@ -172,6 +176,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { ctx: ctx, config: cfg, AccessToken: NewAccessTokenClient(cfg), + Cluster: NewClusterClient(cfg), Env: NewEnvClient(cfg), EnvAlias: NewEnvAliasClient(cfg), EnvBuild: NewEnvBuildClient(cfg), @@ -201,6 +206,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) ctx: ctx, config: cfg, AccessToken: NewAccessTokenClient(cfg), + Cluster: NewClusterClient(cfg), Env: NewEnvClient(cfg), EnvAlias: NewEnvAliasClient(cfg), EnvBuild: NewEnvBuildClient(cfg), @@ -239,8 +245,8 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.AccessToken, c.Env, c.EnvAlias, c.EnvBuild, c.Snapshot, c.Team, c.TeamAPIKey, - c.Tier, c.User, c.UsersTeams, + c.AccessToken, c.Cluster, c.Env, c.EnvAlias, c.EnvBuild, c.Snapshot, c.Team, + c.TeamAPIKey, c.Tier, c.User, c.UsersTeams, } { n.Use(hooks...) } @@ -250,8 +256,8 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.AccessToken, c.Env, c.EnvAlias, c.EnvBuild, c.Snapshot, c.Team, c.TeamAPIKey, - c.Tier, c.User, c.UsersTeams, + c.AccessToken, c.Cluster, c.Env, c.EnvAlias, c.EnvBuild, c.Snapshot, c.Team, + c.TeamAPIKey, c.Tier, c.User, c.UsersTeams, } { n.Intercept(interceptors...) } @@ -262,6 +268,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { switch m := m.(type) { case *AccessTokenMutation: return c.AccessToken.mutate(ctx, m) + case *ClusterMutation: + return c.Cluster.mutate(ctx, m) case *EnvMutation: return c.Env.mutate(ctx, m) case *EnvAliasMutation: @@ -346,7 +354,7 @@ func (c *AccessTokenClient) UpdateOne(at *AccessToken) *AccessTokenUpdateOne { } // UpdateOneID returns an update builder for the given id. -func (c *AccessTokenClient) UpdateOneID(id string) *AccessTokenUpdateOne { +func (c *AccessTokenClient) UpdateOneID(id uuid.UUID) *AccessTokenUpdateOne { mutation := newAccessTokenMutation(c.config, OpUpdateOne, withAccessTokenID(id)) return &AccessTokenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} } @@ -363,7 +371,7 @@ func (c *AccessTokenClient) DeleteOne(at *AccessToken) *AccessTokenDeleteOne { } // DeleteOneID returns a builder for deleting the given entity by its id. -func (c *AccessTokenClient) DeleteOneID(id string) *AccessTokenDeleteOne { +func (c *AccessTokenClient) DeleteOneID(id uuid.UUID) *AccessTokenDeleteOne { builder := c.Delete().Where(accesstoken.ID(id)) builder.mutation.id = &id builder.mutation.op = OpDeleteOne @@ -380,12 +388,12 @@ func (c *AccessTokenClient) Query() *AccessTokenQuery { } // Get returns a AccessToken entity by its id. -func (c *AccessTokenClient) Get(ctx context.Context, id string) (*AccessToken, error) { +func (c *AccessTokenClient) Get(ctx context.Context, id uuid.UUID) (*AccessToken, error) { return c.Query().Where(accesstoken.ID(id)).Only(ctx) } // GetX is like Get, but panics if an error occurs. -func (c *AccessTokenClient) GetX(ctx context.Context, id string) *AccessToken { +func (c *AccessTokenClient) GetX(ctx context.Context, id uuid.UUID) *AccessToken { obj, err := c.Get(ctx, id) if err != nil { panic(err) @@ -437,6 +445,139 @@ func (c *AccessTokenClient) mutate(ctx context.Context, m *AccessTokenMutation) } } +// ClusterClient is a client for the Cluster schema. +type ClusterClient struct { + config +} + +// NewClusterClient returns a client for the Cluster from the given config. +func NewClusterClient(c config) *ClusterClient { + return &ClusterClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `cluster.Hooks(f(g(h())))`. +func (c *ClusterClient) Use(hooks ...Hook) { + c.hooks.Cluster = append(c.hooks.Cluster, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `cluster.Intercept(f(g(h())))`. +func (c *ClusterClient) Intercept(interceptors ...Interceptor) { + c.inters.Cluster = append(c.inters.Cluster, interceptors...) +} + +// Create returns a builder for creating a Cluster entity. +func (c *ClusterClient) Create() *ClusterCreate { + mutation := newClusterMutation(c.config, OpCreate) + return &ClusterCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Cluster entities. +func (c *ClusterClient) CreateBulk(builders ...*ClusterCreate) *ClusterCreateBulk { + return &ClusterCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ClusterClient) MapCreateBulk(slice any, setFunc func(*ClusterCreate, int)) *ClusterCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ClusterCreateBulk{err: fmt.Errorf("calling to ClusterClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ClusterCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ClusterCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Cluster. +func (c *ClusterClient) Update() *ClusterUpdate { + mutation := newClusterMutation(c.config, OpUpdate) + return &ClusterUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ClusterClient) UpdateOne(cl *Cluster) *ClusterUpdateOne { + mutation := newClusterMutation(c.config, OpUpdateOne, withCluster(cl)) + return &ClusterUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ClusterClient) UpdateOneID(id uuid.UUID) *ClusterUpdateOne { + mutation := newClusterMutation(c.config, OpUpdateOne, withClusterID(id)) + return &ClusterUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Cluster. +func (c *ClusterClient) Delete() *ClusterDelete { + mutation := newClusterMutation(c.config, OpDelete) + return &ClusterDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ClusterClient) DeleteOne(cl *Cluster) *ClusterDeleteOne { + return c.DeleteOneID(cl.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ClusterClient) DeleteOneID(id uuid.UUID) *ClusterDeleteOne { + builder := c.Delete().Where(cluster.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ClusterDeleteOne{builder} +} + +// Query returns a query builder for Cluster. +func (c *ClusterClient) Query() *ClusterQuery { + return &ClusterQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeCluster}, + inters: c.Interceptors(), + } +} + +// Get returns a Cluster entity by its id. +func (c *ClusterClient) Get(ctx context.Context, id uuid.UUID) (*Cluster, error) { + return c.Query().Where(cluster.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ClusterClient) GetX(ctx context.Context, id uuid.UUID) *Cluster { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *ClusterClient) Hooks() []Hook { + return c.hooks.Cluster +} + +// Interceptors returns the client interceptors. +func (c *ClusterClient) Interceptors() []Interceptor { + return c.inters.Cluster +} + +func (c *ClusterClient) mutate(ctx context.Context, m *ClusterMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ClusterCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ClusterUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ClusterUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ClusterDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("models: unknown Cluster mutation op: %q", m.Op()) + } +} + // EnvClient is a client for the Env schema. type EnvClient struct { config @@ -2074,12 +2215,12 @@ func (c *UsersTeamsClient) mutate(ctx context.Context, m *UsersTeamsMutation) (V // hooks and interceptors per client, for fast access. type ( hooks struct { - AccessToken, Env, EnvAlias, EnvBuild, Snapshot, Team, TeamAPIKey, Tier, User, - UsersTeams []ent.Hook + AccessToken, Cluster, Env, EnvAlias, EnvBuild, Snapshot, Team, TeamAPIKey, Tier, + User, UsersTeams []ent.Hook } inters struct { - AccessToken, Env, EnvAlias, EnvBuild, Snapshot, Team, TeamAPIKey, Tier, User, - UsersTeams []ent.Interceptor + AccessToken, Cluster, Env, EnvAlias, EnvBuild, Snapshot, Team, TeamAPIKey, Tier, + User, UsersTeams []ent.Interceptor } ) @@ -2087,6 +2228,7 @@ var ( // DefaultSchemaConfig represents the default schema names for all tables as defined in ent/schema. DefaultSchemaConfig = SchemaConfig{ AccessToken: tableSchemas[1], + Cluster: tableSchemas[1], Env: tableSchemas[1], EnvAlias: tableSchemas[1], EnvBuild: tableSchemas[1], diff --git a/packages/shared/pkg/models/cluster.go b/packages/shared/pkg/models/cluster.go new file mode 100644 index 0000000..6acdd02 --- /dev/null +++ b/packages/shared/pkg/models/cluster.go @@ -0,0 +1,127 @@ +// Code generated by ent, DO NOT EDIT. + +package models + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" + "github.com/google/uuid" +) + +// Cluster is the model entity for the Cluster schema. +type Cluster struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // Endpoint holds the value of the "endpoint" field. + Endpoint string `json:"endpoint,omitempty"` + // EndpointTLS holds the value of the "endpoint_tls" field. + EndpointTLS bool `json:"endpoint_tls,omitempty"` + // Token holds the value of the "token" field. + Token string `json:"-"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Cluster) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case cluster.FieldEndpointTLS: + values[i] = new(sql.NullBool) + case cluster.FieldEndpoint, cluster.FieldToken: + values[i] = new(sql.NullString) + case cluster.FieldID: + values[i] = new(uuid.UUID) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Cluster fields. +func (c *Cluster) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case cluster.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + c.ID = *value + } + case cluster.FieldEndpoint: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field endpoint", values[i]) + } else if value.Valid { + c.Endpoint = value.String + } + case cluster.FieldEndpointTLS: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field endpoint_tls", values[i]) + } else if value.Valid { + c.EndpointTLS = value.Bool + } + case cluster.FieldToken: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field token", values[i]) + } else if value.Valid { + c.Token = value.String + } + default: + c.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Cluster. +// This includes values selected through modifiers, order, etc. +func (c *Cluster) Value(name string) (ent.Value, error) { + return c.selectValues.Get(name) +} + +// Update returns a builder for updating this Cluster. +// Note that you need to call Cluster.Unwrap() before calling this method if this Cluster +// was returned from a transaction, and the transaction was committed or rolled back. +func (c *Cluster) Update() *ClusterUpdateOne { + return NewClusterClient(c.config).UpdateOne(c) +} + +// Unwrap unwraps the Cluster entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (c *Cluster) Unwrap() *Cluster { + _tx, ok := c.config.driver.(*txDriver) + if !ok { + panic("models: Cluster is not a transactional entity") + } + c.config.driver = _tx.drv + return c +} + +// String implements the fmt.Stringer. +func (c *Cluster) String() string { + var builder strings.Builder + builder.WriteString("Cluster(") + builder.WriteString(fmt.Sprintf("id=%v, ", c.ID)) + builder.WriteString("endpoint=") + builder.WriteString(c.Endpoint) + builder.WriteString(", ") + builder.WriteString("endpoint_tls=") + builder.WriteString(fmt.Sprintf("%v", c.EndpointTLS)) + builder.WriteString(", ") + builder.WriteString("token=") + builder.WriteByte(')') + return builder.String() +} + +// Clusters is a parsable slice of Cluster. +type Clusters []*Cluster diff --git a/packages/shared/pkg/models/cluster/cluster.go b/packages/shared/pkg/models/cluster/cluster.go new file mode 100644 index 0000000..06a3ba0 --- /dev/null +++ b/packages/shared/pkg/models/cluster/cluster.go @@ -0,0 +1,72 @@ +// Code generated by ent, DO NOT EDIT. + +package cluster + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the cluster type in the database. + Label = "cluster" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldEndpoint holds the string denoting the endpoint field in the database. + FieldEndpoint = "endpoint" + // FieldEndpointTLS holds the string denoting the endpoint_tls field in the database. + FieldEndpointTLS = "endpoint_tls" + // FieldToken holds the string denoting the token field in the database. + FieldToken = "token" + // Table holds the table name of the cluster in the database. + Table = "clusters" +) + +// Columns holds all SQL columns for cluster fields. +var Columns = []string{ + FieldID, + FieldEndpoint, + FieldEndpointTLS, + FieldToken, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save. + EndpointValidator func(string) error + // DefaultEndpointTLS holds the default value on creation for the "endpoint_tls" field. + DefaultEndpointTLS bool + // TokenValidator is a validator for the "token" field. It is called by the builders before save. + TokenValidator func(string) error +) + +// OrderOption defines the ordering options for the Cluster queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByEndpoint orders the results by the endpoint field. +func ByEndpoint(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndpoint, opts...).ToFunc() +} + +// ByEndpointTLS orders the results by the endpoint_tls field. +func ByEndpointTLS(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndpointTLS, opts...).ToFunc() +} + +// ByToken orders the results by the token field. +func ByToken(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldToken, opts...).ToFunc() +} diff --git a/packages/shared/pkg/models/cluster/where.go b/packages/shared/pkg/models/cluster/where.go new file mode 100644 index 0000000..0930f58 --- /dev/null +++ b/packages/shared/pkg/models/cluster/where.go @@ -0,0 +1,224 @@ +// Code generated by ent, DO NOT EDIT. + +package cluster + +import ( + "entgo.io/ent/dialect/sql" + "github.com/e2b-dev/infra/packages/shared/pkg/models/predicate" + "github.com/google/uuid" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Cluster { + return predicate.Cluster(sql.FieldLTE(FieldID, id)) +} + +// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ. +func Endpoint(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldEndpoint, v)) +} + +// EndpointTLS applies equality check predicate on the "endpoint_tls" field. It's identical to EndpointTLSEQ. +func EndpointTLS(v bool) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldEndpointTLS, v)) +} + +// Token applies equality check predicate on the "token" field. It's identical to TokenEQ. +func Token(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldToken, v)) +} + +// EndpointEQ applies the EQ predicate on the "endpoint" field. +func EndpointEQ(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldEndpoint, v)) +} + +// EndpointNEQ applies the NEQ predicate on the "endpoint" field. +func EndpointNEQ(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldNEQ(FieldEndpoint, v)) +} + +// EndpointIn applies the In predicate on the "endpoint" field. +func EndpointIn(vs ...string) predicate.Cluster { + return predicate.Cluster(sql.FieldIn(FieldEndpoint, vs...)) +} + +// EndpointNotIn applies the NotIn predicate on the "endpoint" field. +func EndpointNotIn(vs ...string) predicate.Cluster { + return predicate.Cluster(sql.FieldNotIn(FieldEndpoint, vs...)) +} + +// EndpointGT applies the GT predicate on the "endpoint" field. +func EndpointGT(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldGT(FieldEndpoint, v)) +} + +// EndpointGTE applies the GTE predicate on the "endpoint" field. +func EndpointGTE(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldGTE(FieldEndpoint, v)) +} + +// EndpointLT applies the LT predicate on the "endpoint" field. +func EndpointLT(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldLT(FieldEndpoint, v)) +} + +// EndpointLTE applies the LTE predicate on the "endpoint" field. +func EndpointLTE(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldLTE(FieldEndpoint, v)) +} + +// EndpointContains applies the Contains predicate on the "endpoint" field. +func EndpointContains(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldContains(FieldEndpoint, v)) +} + +// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field. +func EndpointHasPrefix(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldHasPrefix(FieldEndpoint, v)) +} + +// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field. +func EndpointHasSuffix(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldHasSuffix(FieldEndpoint, v)) +} + +// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field. +func EndpointEqualFold(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEqualFold(FieldEndpoint, v)) +} + +// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field. +func EndpointContainsFold(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldContainsFold(FieldEndpoint, v)) +} + +// EndpointTLSEQ applies the EQ predicate on the "endpoint_tls" field. +func EndpointTLSEQ(v bool) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldEndpointTLS, v)) +} + +// EndpointTLSNEQ applies the NEQ predicate on the "endpoint_tls" field. +func EndpointTLSNEQ(v bool) predicate.Cluster { + return predicate.Cluster(sql.FieldNEQ(FieldEndpointTLS, v)) +} + +// TokenEQ applies the EQ predicate on the "token" field. +func TokenEQ(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEQ(FieldToken, v)) +} + +// TokenNEQ applies the NEQ predicate on the "token" field. +func TokenNEQ(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldNEQ(FieldToken, v)) +} + +// TokenIn applies the In predicate on the "token" field. +func TokenIn(vs ...string) predicate.Cluster { + return predicate.Cluster(sql.FieldIn(FieldToken, vs...)) +} + +// TokenNotIn applies the NotIn predicate on the "token" field. +func TokenNotIn(vs ...string) predicate.Cluster { + return predicate.Cluster(sql.FieldNotIn(FieldToken, vs...)) +} + +// TokenGT applies the GT predicate on the "token" field. +func TokenGT(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldGT(FieldToken, v)) +} + +// TokenGTE applies the GTE predicate on the "token" field. +func TokenGTE(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldGTE(FieldToken, v)) +} + +// TokenLT applies the LT predicate on the "token" field. +func TokenLT(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldLT(FieldToken, v)) +} + +// TokenLTE applies the LTE predicate on the "token" field. +func TokenLTE(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldLTE(FieldToken, v)) +} + +// TokenContains applies the Contains predicate on the "token" field. +func TokenContains(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldContains(FieldToken, v)) +} + +// TokenHasPrefix applies the HasPrefix predicate on the "token" field. +func TokenHasPrefix(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldHasPrefix(FieldToken, v)) +} + +// TokenHasSuffix applies the HasSuffix predicate on the "token" field. +func TokenHasSuffix(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldHasSuffix(FieldToken, v)) +} + +// TokenEqualFold applies the EqualFold predicate on the "token" field. +func TokenEqualFold(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldEqualFold(FieldToken, v)) +} + +// TokenContainsFold applies the ContainsFold predicate on the "token" field. +func TokenContainsFold(v string) predicate.Cluster { + return predicate.Cluster(sql.FieldContainsFold(FieldToken, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Cluster) predicate.Cluster { + return predicate.Cluster(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Cluster) predicate.Cluster { + return predicate.Cluster(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Cluster) predicate.Cluster { + return predicate.Cluster(sql.NotPredicates(p)) +} diff --git a/packages/shared/pkg/models/cluster_create.go b/packages/shared/pkg/models/cluster_create.go new file mode 100644 index 0000000..9d5495b --- /dev/null +++ b/packages/shared/pkg/models/cluster_create.go @@ -0,0 +1,625 @@ +// Code generated by ent, DO NOT EDIT. + +package models + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" + "github.com/google/uuid" +) + +// ClusterCreate is the builder for creating a Cluster entity. +type ClusterCreate struct { + config + mutation *ClusterMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetEndpoint sets the "endpoint" field. +func (cc *ClusterCreate) SetEndpoint(s string) *ClusterCreate { + cc.mutation.SetEndpoint(s) + return cc +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (cc *ClusterCreate) SetEndpointTLS(b bool) *ClusterCreate { + cc.mutation.SetEndpointTLS(b) + return cc +} + +// SetNillableEndpointTLS sets the "endpoint_tls" field if the given value is not nil. +func (cc *ClusterCreate) SetNillableEndpointTLS(b *bool) *ClusterCreate { + if b != nil { + cc.SetEndpointTLS(*b) + } + return cc +} + +// SetToken sets the "token" field. +func (cc *ClusterCreate) SetToken(s string) *ClusterCreate { + cc.mutation.SetToken(s) + return cc +} + +// SetID sets the "id" field. +func (cc *ClusterCreate) SetID(u uuid.UUID) *ClusterCreate { + cc.mutation.SetID(u) + return cc +} + +// Mutation returns the ClusterMutation object of the builder. +func (cc *ClusterCreate) Mutation() *ClusterMutation { + return cc.mutation +} + +// Save creates the Cluster in the database. +func (cc *ClusterCreate) Save(ctx context.Context) (*Cluster, error) { + cc.defaults() + return withHooks(ctx, cc.sqlSave, cc.mutation, cc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (cc *ClusterCreate) SaveX(ctx context.Context) *Cluster { + v, err := cc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (cc *ClusterCreate) Exec(ctx context.Context) error { + _, err := cc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cc *ClusterCreate) ExecX(ctx context.Context) { + if err := cc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (cc *ClusterCreate) defaults() { + if _, ok := cc.mutation.EndpointTLS(); !ok { + v := cluster.DefaultEndpointTLS + cc.mutation.SetEndpointTLS(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cc *ClusterCreate) check() error { + if _, ok := cc.mutation.Endpoint(); !ok { + return &ValidationError{Name: "endpoint", err: errors.New(`models: missing required field "Cluster.endpoint"`)} + } + if v, ok := cc.mutation.Endpoint(); ok { + if err := cluster.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`models: validator failed for field "Cluster.endpoint": %w`, err)} + } + } + if _, ok := cc.mutation.EndpointTLS(); !ok { + return &ValidationError{Name: "endpoint_tls", err: errors.New(`models: missing required field "Cluster.endpoint_tls"`)} + } + if _, ok := cc.mutation.Token(); !ok { + return &ValidationError{Name: "token", err: errors.New(`models: missing required field "Cluster.token"`)} + } + if v, ok := cc.mutation.Token(); ok { + if err := cluster.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`models: validator failed for field "Cluster.token": %w`, err)} + } + } + return nil +} + +func (cc *ClusterCreate) sqlSave(ctx context.Context) (*Cluster, error) { + if err := cc.check(); err != nil { + return nil, err + } + _node, _spec := cc.createSpec() + if err := sqlgraph.CreateNode(ctx, cc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + cc.mutation.id = &_node.ID + cc.mutation.done = true + return _node, nil +} + +func (cc *ClusterCreate) createSpec() (*Cluster, *sqlgraph.CreateSpec) { + var ( + _node = &Cluster{config: cc.config} + _spec = sqlgraph.NewCreateSpec(cluster.Table, sqlgraph.NewFieldSpec(cluster.FieldID, field.TypeUUID)) + ) + _spec.Schema = cc.schemaConfig.Cluster + _spec.OnConflict = cc.conflict + if id, ok := cc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := cc.mutation.Endpoint(); ok { + _spec.SetField(cluster.FieldEndpoint, field.TypeString, value) + _node.Endpoint = value + } + if value, ok := cc.mutation.EndpointTLS(); ok { + _spec.SetField(cluster.FieldEndpointTLS, field.TypeBool, value) + _node.EndpointTLS = value + } + if value, ok := cc.mutation.Token(); ok { + _spec.SetField(cluster.FieldToken, field.TypeString, value) + _node.Token = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Cluster.Create(). +// SetEndpoint(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ClusterUpsert) { +// SetEndpoint(v+v). +// }). +// Exec(ctx) +func (cc *ClusterCreate) OnConflict(opts ...sql.ConflictOption) *ClusterUpsertOne { + cc.conflict = opts + return &ClusterUpsertOne{ + create: cc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (cc *ClusterCreate) OnConflictColumns(columns ...string) *ClusterUpsertOne { + cc.conflict = append(cc.conflict, sql.ConflictColumns(columns...)) + return &ClusterUpsertOne{ + create: cc, + } +} + +type ( + // ClusterUpsertOne is the builder for "upsert"-ing + // one Cluster node. + ClusterUpsertOne struct { + create *ClusterCreate + } + + // ClusterUpsert is the "OnConflict" setter. + ClusterUpsert struct { + *sql.UpdateSet + } +) + +// SetEndpoint sets the "endpoint" field. +func (u *ClusterUpsert) SetEndpoint(v string) *ClusterUpsert { + u.Set(cluster.FieldEndpoint, v) + return u +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ClusterUpsert) UpdateEndpoint() *ClusterUpsert { + u.SetExcluded(cluster.FieldEndpoint) + return u +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (u *ClusterUpsert) SetEndpointTLS(v bool) *ClusterUpsert { + u.Set(cluster.FieldEndpointTLS, v) + return u +} + +// UpdateEndpointTLS sets the "endpoint_tls" field to the value that was provided on create. +func (u *ClusterUpsert) UpdateEndpointTLS() *ClusterUpsert { + u.SetExcluded(cluster.FieldEndpointTLS) + return u +} + +// SetToken sets the "token" field. +func (u *ClusterUpsert) SetToken(v string) *ClusterUpsert { + u.Set(cluster.FieldToken, v) + return u +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *ClusterUpsert) UpdateToken() *ClusterUpsert { + u.SetExcluded(cluster.FieldToken) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(cluster.FieldID) +// }), +// ). +// Exec(ctx) +func (u *ClusterUpsertOne) UpdateNewValues() *ClusterUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(cluster.FieldID) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ClusterUpsertOne) Ignore() *ClusterUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ClusterUpsertOne) DoNothing() *ClusterUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ClusterCreate.OnConflict +// documentation for more info. +func (u *ClusterUpsertOne) Update(set func(*ClusterUpsert)) *ClusterUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ClusterUpsert{UpdateSet: update}) + })) + return u +} + +// SetEndpoint sets the "endpoint" field. +func (u *ClusterUpsertOne) SetEndpoint(v string) *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.SetEndpoint(v) + }) +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ClusterUpsertOne) UpdateEndpoint() *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.UpdateEndpoint() + }) +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (u *ClusterUpsertOne) SetEndpointTLS(v bool) *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.SetEndpointTLS(v) + }) +} + +// UpdateEndpointTLS sets the "endpoint_tls" field to the value that was provided on create. +func (u *ClusterUpsertOne) UpdateEndpointTLS() *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.UpdateEndpointTLS() + }) +} + +// SetToken sets the "token" field. +func (u *ClusterUpsertOne) SetToken(v string) *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.SetToken(v) + }) +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *ClusterUpsertOne) UpdateToken() *ClusterUpsertOne { + return u.Update(func(s *ClusterUpsert) { + s.UpdateToken() + }) +} + +// Exec executes the query. +func (u *ClusterUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("models: missing options for ClusterCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ClusterUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ClusterUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("models: ClusterUpsertOne.ID is not supported by MySQL driver. Use ClusterUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ClusterUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ClusterCreateBulk is the builder for creating many Cluster entities in bulk. +type ClusterCreateBulk struct { + config + err error + builders []*ClusterCreate + conflict []sql.ConflictOption +} + +// Save creates the Cluster entities in the database. +func (ccb *ClusterCreateBulk) Save(ctx context.Context) ([]*Cluster, error) { + if ccb.err != nil { + return nil, ccb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ccb.builders)) + nodes := make([]*Cluster, len(ccb.builders)) + mutators := make([]Mutator, len(ccb.builders)) + for i := range ccb.builders { + func(i int, root context.Context) { + builder := ccb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ClusterMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ccb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ccb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ccb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ccb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ccb *ClusterCreateBulk) SaveX(ctx context.Context) []*Cluster { + v, err := ccb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ccb *ClusterCreateBulk) Exec(ctx context.Context) error { + _, err := ccb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ccb *ClusterCreateBulk) ExecX(ctx context.Context) { + if err := ccb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Cluster.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ClusterUpsert) { +// SetEndpoint(v+v). +// }). +// Exec(ctx) +func (ccb *ClusterCreateBulk) OnConflict(opts ...sql.ConflictOption) *ClusterUpsertBulk { + ccb.conflict = opts + return &ClusterUpsertBulk{ + create: ccb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ccb *ClusterCreateBulk) OnConflictColumns(columns ...string) *ClusterUpsertBulk { + ccb.conflict = append(ccb.conflict, sql.ConflictColumns(columns...)) + return &ClusterUpsertBulk{ + create: ccb, + } +} + +// ClusterUpsertBulk is the builder for "upsert"-ing +// a bulk of Cluster nodes. +type ClusterUpsertBulk struct { + create *ClusterCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(cluster.FieldID) +// }), +// ). +// Exec(ctx) +func (u *ClusterUpsertBulk) UpdateNewValues() *ClusterUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(cluster.FieldID) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Cluster.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ClusterUpsertBulk) Ignore() *ClusterUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ClusterUpsertBulk) DoNothing() *ClusterUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ClusterCreateBulk.OnConflict +// documentation for more info. +func (u *ClusterUpsertBulk) Update(set func(*ClusterUpsert)) *ClusterUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ClusterUpsert{UpdateSet: update}) + })) + return u +} + +// SetEndpoint sets the "endpoint" field. +func (u *ClusterUpsertBulk) SetEndpoint(v string) *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.SetEndpoint(v) + }) +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ClusterUpsertBulk) UpdateEndpoint() *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.UpdateEndpoint() + }) +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (u *ClusterUpsertBulk) SetEndpointTLS(v bool) *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.SetEndpointTLS(v) + }) +} + +// UpdateEndpointTLS sets the "endpoint_tls" field to the value that was provided on create. +func (u *ClusterUpsertBulk) UpdateEndpointTLS() *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.UpdateEndpointTLS() + }) +} + +// SetToken sets the "token" field. +func (u *ClusterUpsertBulk) SetToken(v string) *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.SetToken(v) + }) +} + +// UpdateToken sets the "token" field to the value that was provided on create. +func (u *ClusterUpsertBulk) UpdateToken() *ClusterUpsertBulk { + return u.Update(func(s *ClusterUpsert) { + s.UpdateToken() + }) +} + +// Exec executes the query. +func (u *ClusterUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("models: OnConflict was set for builder %d. Set it on the ClusterCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("models: missing options for ClusterCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ClusterUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/packages/shared/pkg/models/cluster_delete.go b/packages/shared/pkg/models/cluster_delete.go new file mode 100644 index 0000000..a2ae974 --- /dev/null +++ b/packages/shared/pkg/models/cluster_delete.go @@ -0,0 +1,91 @@ +// Code generated by ent, DO NOT EDIT. + +package models + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" + "github.com/e2b-dev/infra/packages/shared/pkg/models/internal" + "github.com/e2b-dev/infra/packages/shared/pkg/models/predicate" +) + +// ClusterDelete is the builder for deleting a Cluster entity. +type ClusterDelete struct { + config + hooks []Hook + mutation *ClusterMutation +} + +// Where appends a list predicates to the ClusterDelete builder. +func (cd *ClusterDelete) Where(ps ...predicate.Cluster) *ClusterDelete { + cd.mutation.Where(ps...) + return cd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (cd *ClusterDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, cd.sqlExec, cd.mutation, cd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (cd *ClusterDelete) ExecX(ctx context.Context) int { + n, err := cd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (cd *ClusterDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(cluster.Table, sqlgraph.NewFieldSpec(cluster.FieldID, field.TypeUUID)) + _spec.Node.Schema = cd.schemaConfig.Cluster + ctx = internal.NewSchemaConfigContext(ctx, cd.schemaConfig) + if ps := cd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, cd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + cd.mutation.done = true + return affected, err +} + +// ClusterDeleteOne is the builder for deleting a single Cluster entity. +type ClusterDeleteOne struct { + cd *ClusterDelete +} + +// Where appends a list predicates to the ClusterDelete builder. +func (cdo *ClusterDeleteOne) Where(ps ...predicate.Cluster) *ClusterDeleteOne { + cdo.cd.mutation.Where(ps...) + return cdo +} + +// Exec executes the deletion query. +func (cdo *ClusterDeleteOne) Exec(ctx context.Context) error { + n, err := cdo.cd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{cluster.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (cdo *ClusterDeleteOne) ExecX(ctx context.Context) { + if err := cdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/packages/shared/pkg/models/cluster_query.go b/packages/shared/pkg/models/cluster_query.go new file mode 100644 index 0000000..28cf677 --- /dev/null +++ b/packages/shared/pkg/models/cluster_query.go @@ -0,0 +1,557 @@ +// Code generated by ent, DO NOT EDIT. + +package models + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" + "github.com/e2b-dev/infra/packages/shared/pkg/models/internal" + "github.com/e2b-dev/infra/packages/shared/pkg/models/predicate" + "github.com/google/uuid" +) + +// ClusterQuery is the builder for querying Cluster entities. +type ClusterQuery struct { + config + ctx *QueryContext + order []cluster.OrderOption + inters []Interceptor + predicates []predicate.Cluster + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ClusterQuery builder. +func (cq *ClusterQuery) Where(ps ...predicate.Cluster) *ClusterQuery { + cq.predicates = append(cq.predicates, ps...) + return cq +} + +// Limit the number of records to be returned by this query. +func (cq *ClusterQuery) Limit(limit int) *ClusterQuery { + cq.ctx.Limit = &limit + return cq +} + +// Offset to start from. +func (cq *ClusterQuery) Offset(offset int) *ClusterQuery { + cq.ctx.Offset = &offset + return cq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (cq *ClusterQuery) Unique(unique bool) *ClusterQuery { + cq.ctx.Unique = &unique + return cq +} + +// Order specifies how the records should be ordered. +func (cq *ClusterQuery) Order(o ...cluster.OrderOption) *ClusterQuery { + cq.order = append(cq.order, o...) + return cq +} + +// First returns the first Cluster entity from the query. +// Returns a *NotFoundError when no Cluster was found. +func (cq *ClusterQuery) First(ctx context.Context) (*Cluster, error) { + nodes, err := cq.Limit(1).All(setContextOp(ctx, cq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{cluster.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (cq *ClusterQuery) FirstX(ctx context.Context) *Cluster { + node, err := cq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Cluster ID from the query. +// Returns a *NotFoundError when no Cluster ID was found. +func (cq *ClusterQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = cq.Limit(1).IDs(setContextOp(ctx, cq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{cluster.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (cq *ClusterQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := cq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Cluster entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Cluster entity is found. +// Returns a *NotFoundError when no Cluster entities are found. +func (cq *ClusterQuery) Only(ctx context.Context) (*Cluster, error) { + nodes, err := cq.Limit(2).All(setContextOp(ctx, cq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{cluster.Label} + default: + return nil, &NotSingularError{cluster.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (cq *ClusterQuery) OnlyX(ctx context.Context) *Cluster { + node, err := cq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Cluster ID in the query. +// Returns a *NotSingularError when more than one Cluster ID is found. +// Returns a *NotFoundError when no entities are found. +func (cq *ClusterQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = cq.Limit(2).IDs(setContextOp(ctx, cq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{cluster.Label} + default: + err = &NotSingularError{cluster.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (cq *ClusterQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := cq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Clusters. +func (cq *ClusterQuery) All(ctx context.Context) ([]*Cluster, error) { + ctx = setContextOp(ctx, cq.ctx, "All") + if err := cq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Cluster, *ClusterQuery]() + return withInterceptors[[]*Cluster](ctx, cq, qr, cq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (cq *ClusterQuery) AllX(ctx context.Context) []*Cluster { + nodes, err := cq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Cluster IDs. +func (cq *ClusterQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if cq.ctx.Unique == nil && cq.path != nil { + cq.Unique(true) + } + ctx = setContextOp(ctx, cq.ctx, "IDs") + if err = cq.Select(cluster.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (cq *ClusterQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := cq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (cq *ClusterQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, cq.ctx, "Count") + if err := cq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, cq, querierCount[*ClusterQuery](), cq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (cq *ClusterQuery) CountX(ctx context.Context) int { + count, err := cq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (cq *ClusterQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, cq.ctx, "Exist") + switch _, err := cq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("models: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (cq *ClusterQuery) ExistX(ctx context.Context) bool { + exist, err := cq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ClusterQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (cq *ClusterQuery) Clone() *ClusterQuery { + if cq == nil { + return nil + } + return &ClusterQuery{ + config: cq.config, + ctx: cq.ctx.Clone(), + order: append([]cluster.OrderOption{}, cq.order...), + inters: append([]Interceptor{}, cq.inters...), + predicates: append([]predicate.Cluster{}, cq.predicates...), + // clone intermediate query. + sql: cq.sql.Clone(), + path: cq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Endpoint string `json:"endpoint,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Cluster.Query(). +// GroupBy(cluster.FieldEndpoint). +// Aggregate(models.Count()). +// Scan(ctx, &v) +func (cq *ClusterQuery) GroupBy(field string, fields ...string) *ClusterGroupBy { + cq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ClusterGroupBy{build: cq} + grbuild.flds = &cq.ctx.Fields + grbuild.label = cluster.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Endpoint string `json:"endpoint,omitempty"` +// } +// +// client.Cluster.Query(). +// Select(cluster.FieldEndpoint). +// Scan(ctx, &v) +func (cq *ClusterQuery) Select(fields ...string) *ClusterSelect { + cq.ctx.Fields = append(cq.ctx.Fields, fields...) + sbuild := &ClusterSelect{ClusterQuery: cq} + sbuild.label = cluster.Label + sbuild.flds, sbuild.scan = &cq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ClusterSelect configured with the given aggregations. +func (cq *ClusterQuery) Aggregate(fns ...AggregateFunc) *ClusterSelect { + return cq.Select().Aggregate(fns...) +} + +func (cq *ClusterQuery) prepareQuery(ctx context.Context) error { + for _, inter := range cq.inters { + if inter == nil { + return fmt.Errorf("models: uninitialized interceptor (forgotten import models/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, cq); err != nil { + return err + } + } + } + for _, f := range cq.ctx.Fields { + if !cluster.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("models: invalid field %q for query", f)} + } + } + if cq.path != nil { + prev, err := cq.path(ctx) + if err != nil { + return err + } + cq.sql = prev + } + return nil +} + +func (cq *ClusterQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Cluster, error) { + var ( + nodes = []*Cluster{} + _spec = cq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Cluster).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Cluster{config: cq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + _spec.Node.Schema = cq.schemaConfig.Cluster + ctx = internal.NewSchemaConfigContext(ctx, cq.schemaConfig) + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, cq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (cq *ClusterQuery) sqlCount(ctx context.Context) (int, error) { + _spec := cq.querySpec() + _spec.Node.Schema = cq.schemaConfig.Cluster + ctx = internal.NewSchemaConfigContext(ctx, cq.schemaConfig) + if len(cq.modifiers) > 0 { + _spec.Modifiers = cq.modifiers + } + _spec.Node.Columns = cq.ctx.Fields + if len(cq.ctx.Fields) > 0 { + _spec.Unique = cq.ctx.Unique != nil && *cq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, cq.driver, _spec) +} + +func (cq *ClusterQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(cluster.Table, cluster.Columns, sqlgraph.NewFieldSpec(cluster.FieldID, field.TypeUUID)) + _spec.From = cq.sql + if unique := cq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if cq.path != nil { + _spec.Unique = true + } + if fields := cq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, cluster.FieldID) + for i := range fields { + if fields[i] != cluster.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := cq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := cq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := cq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := cq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (cq *ClusterQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(cq.driver.Dialect()) + t1 := builder.Table(cluster.Table) + columns := cq.ctx.Fields + if len(columns) == 0 { + columns = cluster.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if cq.sql != nil { + selector = cq.sql + selector.Select(selector.Columns(columns...)...) + } + if cq.ctx.Unique != nil && *cq.ctx.Unique { + selector.Distinct() + } + t1.Schema(cq.schemaConfig.Cluster) + ctx = internal.NewSchemaConfigContext(ctx, cq.schemaConfig) + selector.WithContext(ctx) + for _, m := range cq.modifiers { + m(selector) + } + for _, p := range cq.predicates { + p(selector) + } + for _, p := range cq.order { + p(selector) + } + if offset := cq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := cq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// Modify adds a query modifier for attaching custom logic to queries. +func (cq *ClusterQuery) Modify(modifiers ...func(s *sql.Selector)) *ClusterSelect { + cq.modifiers = append(cq.modifiers, modifiers...) + return cq.Select() +} + +// ClusterGroupBy is the group-by builder for Cluster entities. +type ClusterGroupBy struct { + selector + build *ClusterQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (cgb *ClusterGroupBy) Aggregate(fns ...AggregateFunc) *ClusterGroupBy { + cgb.fns = append(cgb.fns, fns...) + return cgb +} + +// Scan applies the selector query and scans the result into the given value. +func (cgb *ClusterGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cgb.build.ctx, "GroupBy") + if err := cgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ClusterQuery, *ClusterGroupBy](ctx, cgb.build, cgb, cgb.build.inters, v) +} + +func (cgb *ClusterGroupBy) sqlScan(ctx context.Context, root *ClusterQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cgb.fns)) + for _, fn := range cgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cgb.flds)+len(cgb.fns)) + for _, f := range *cgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*cgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ClusterSelect is the builder for selecting fields of Cluster entities. +type ClusterSelect struct { + *ClusterQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cs *ClusterSelect) Aggregate(fns ...AggregateFunc) *ClusterSelect { + cs.fns = append(cs.fns, fns...) + return cs +} + +// Scan applies the selector query and scans the result into the given value. +func (cs *ClusterSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cs.ctx, "Select") + if err := cs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ClusterQuery, *ClusterSelect](ctx, cs.ClusterQuery, cs, cs.inters, v) +} + +func (cs *ClusterSelect) sqlScan(ctx context.Context, root *ClusterQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cs.fns)) + for _, fn := range cs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := cs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// Modify adds a query modifier for attaching custom logic to queries. +func (cs *ClusterSelect) Modify(modifiers ...func(s *sql.Selector)) *ClusterSelect { + cs.modifiers = append(cs.modifiers, modifiers...) + return cs +} diff --git a/packages/shared/pkg/models/cluster_update.go b/packages/shared/pkg/models/cluster_update.go new file mode 100644 index 0000000..dde9c83 --- /dev/null +++ b/packages/shared/pkg/models/cluster_update.go @@ -0,0 +1,334 @@ +// Code generated by ent, DO NOT EDIT. + +package models + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" + "github.com/e2b-dev/infra/packages/shared/pkg/models/internal" + "github.com/e2b-dev/infra/packages/shared/pkg/models/predicate" +) + +// ClusterUpdate is the builder for updating Cluster entities. +type ClusterUpdate struct { + config + hooks []Hook + mutation *ClusterMutation + modifiers []func(*sql.UpdateBuilder) +} + +// Where appends a list predicates to the ClusterUpdate builder. +func (cu *ClusterUpdate) Where(ps ...predicate.Cluster) *ClusterUpdate { + cu.mutation.Where(ps...) + return cu +} + +// SetEndpoint sets the "endpoint" field. +func (cu *ClusterUpdate) SetEndpoint(s string) *ClusterUpdate { + cu.mutation.SetEndpoint(s) + return cu +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (cu *ClusterUpdate) SetNillableEndpoint(s *string) *ClusterUpdate { + if s != nil { + cu.SetEndpoint(*s) + } + return cu +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (cu *ClusterUpdate) SetEndpointTLS(b bool) *ClusterUpdate { + cu.mutation.SetEndpointTLS(b) + return cu +} + +// SetNillableEndpointTLS sets the "endpoint_tls" field if the given value is not nil. +func (cu *ClusterUpdate) SetNillableEndpointTLS(b *bool) *ClusterUpdate { + if b != nil { + cu.SetEndpointTLS(*b) + } + return cu +} + +// SetToken sets the "token" field. +func (cu *ClusterUpdate) SetToken(s string) *ClusterUpdate { + cu.mutation.SetToken(s) + return cu +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (cu *ClusterUpdate) SetNillableToken(s *string) *ClusterUpdate { + if s != nil { + cu.SetToken(*s) + } + return cu +} + +// Mutation returns the ClusterMutation object of the builder. +func (cu *ClusterUpdate) Mutation() *ClusterMutation { + return cu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (cu *ClusterUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, cu.sqlSave, cu.mutation, cu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cu *ClusterUpdate) SaveX(ctx context.Context) int { + affected, err := cu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (cu *ClusterUpdate) Exec(ctx context.Context) error { + _, err := cu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cu *ClusterUpdate) ExecX(ctx context.Context) { + if err := cu.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cu *ClusterUpdate) check() error { + if v, ok := cu.mutation.Endpoint(); ok { + if err := cluster.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`models: validator failed for field "Cluster.endpoint": %w`, err)} + } + } + if v, ok := cu.mutation.Token(); ok { + if err := cluster.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`models: validator failed for field "Cluster.token": %w`, err)} + } + } + return nil +} + +// Modify adds a statement modifier for attaching custom logic to the UPDATE statement. +func (cu *ClusterUpdate) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ClusterUpdate { + cu.modifiers = append(cu.modifiers, modifiers...) + return cu +} + +func (cu *ClusterUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := cu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(cluster.Table, cluster.Columns, sqlgraph.NewFieldSpec(cluster.FieldID, field.TypeUUID)) + if ps := cu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cu.mutation.Endpoint(); ok { + _spec.SetField(cluster.FieldEndpoint, field.TypeString, value) + } + if value, ok := cu.mutation.EndpointTLS(); ok { + _spec.SetField(cluster.FieldEndpointTLS, field.TypeBool, value) + } + if value, ok := cu.mutation.Token(); ok { + _spec.SetField(cluster.FieldToken, field.TypeString, value) + } + _spec.Node.Schema = cu.schemaConfig.Cluster + ctx = internal.NewSchemaConfigContext(ctx, cu.schemaConfig) + _spec.AddModifiers(cu.modifiers...) + if n, err = sqlgraph.UpdateNodes(ctx, cu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{cluster.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + cu.mutation.done = true + return n, nil +} + +// ClusterUpdateOne is the builder for updating a single Cluster entity. +type ClusterUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ClusterMutation + modifiers []func(*sql.UpdateBuilder) +} + +// SetEndpoint sets the "endpoint" field. +func (cuo *ClusterUpdateOne) SetEndpoint(s string) *ClusterUpdateOne { + cuo.mutation.SetEndpoint(s) + return cuo +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (cuo *ClusterUpdateOne) SetNillableEndpoint(s *string) *ClusterUpdateOne { + if s != nil { + cuo.SetEndpoint(*s) + } + return cuo +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (cuo *ClusterUpdateOne) SetEndpointTLS(b bool) *ClusterUpdateOne { + cuo.mutation.SetEndpointTLS(b) + return cuo +} + +// SetNillableEndpointTLS sets the "endpoint_tls" field if the given value is not nil. +func (cuo *ClusterUpdateOne) SetNillableEndpointTLS(b *bool) *ClusterUpdateOne { + if b != nil { + cuo.SetEndpointTLS(*b) + } + return cuo +} + +// SetToken sets the "token" field. +func (cuo *ClusterUpdateOne) SetToken(s string) *ClusterUpdateOne { + cuo.mutation.SetToken(s) + return cuo +} + +// SetNillableToken sets the "token" field if the given value is not nil. +func (cuo *ClusterUpdateOne) SetNillableToken(s *string) *ClusterUpdateOne { + if s != nil { + cuo.SetToken(*s) + } + return cuo +} + +// Mutation returns the ClusterMutation object of the builder. +func (cuo *ClusterUpdateOne) Mutation() *ClusterMutation { + return cuo.mutation +} + +// Where appends a list predicates to the ClusterUpdate builder. +func (cuo *ClusterUpdateOne) Where(ps ...predicate.Cluster) *ClusterUpdateOne { + cuo.mutation.Where(ps...) + return cuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (cuo *ClusterUpdateOne) Select(field string, fields ...string) *ClusterUpdateOne { + cuo.fields = append([]string{field}, fields...) + return cuo +} + +// Save executes the query and returns the updated Cluster entity. +func (cuo *ClusterUpdateOne) Save(ctx context.Context) (*Cluster, error) { + return withHooks(ctx, cuo.sqlSave, cuo.mutation, cuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (cuo *ClusterUpdateOne) SaveX(ctx context.Context) *Cluster { + node, err := cuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (cuo *ClusterUpdateOne) Exec(ctx context.Context) error { + _, err := cuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (cuo *ClusterUpdateOne) ExecX(ctx context.Context) { + if err := cuo.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (cuo *ClusterUpdateOne) check() error { + if v, ok := cuo.mutation.Endpoint(); ok { + if err := cluster.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`models: validator failed for field "Cluster.endpoint": %w`, err)} + } + } + if v, ok := cuo.mutation.Token(); ok { + if err := cluster.TokenValidator(v); err != nil { + return &ValidationError{Name: "token", err: fmt.Errorf(`models: validator failed for field "Cluster.token": %w`, err)} + } + } + return nil +} + +// Modify adds a statement modifier for attaching custom logic to the UPDATE statement. +func (cuo *ClusterUpdateOne) Modify(modifiers ...func(u *sql.UpdateBuilder)) *ClusterUpdateOne { + cuo.modifiers = append(cuo.modifiers, modifiers...) + return cuo +} + +func (cuo *ClusterUpdateOne) sqlSave(ctx context.Context) (_node *Cluster, err error) { + if err := cuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(cluster.Table, cluster.Columns, sqlgraph.NewFieldSpec(cluster.FieldID, field.TypeUUID)) + id, ok := cuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`models: missing "Cluster.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := cuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, cluster.FieldID) + for _, f := range fields { + if !cluster.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("models: invalid field %q for query", f)} + } + if f != cluster.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := cuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := cuo.mutation.Endpoint(); ok { + _spec.SetField(cluster.FieldEndpoint, field.TypeString, value) + } + if value, ok := cuo.mutation.EndpointTLS(); ok { + _spec.SetField(cluster.FieldEndpointTLS, field.TypeBool, value) + } + if value, ok := cuo.mutation.Token(); ok { + _spec.SetField(cluster.FieldToken, field.TypeString, value) + } + _spec.Node.Schema = cuo.schemaConfig.Cluster + ctx = internal.NewSchemaConfigContext(ctx, cuo.schemaConfig) + _spec.AddModifiers(cuo.modifiers...) + _node = &Cluster{config: cuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, cuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{cluster.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + cuo.mutation.done = true + return _node, nil +} diff --git a/packages/shared/pkg/models/ent.go b/packages/shared/pkg/models/ent.go index 130a5fa..9665cf7 100644 --- a/packages/shared/pkg/models/ent.go +++ b/packages/shared/pkg/models/ent.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" "github.com/e2b-dev/infra/packages/shared/pkg/models/env" "github.com/e2b-dev/infra/packages/shared/pkg/models/envalias" "github.com/e2b-dev/infra/packages/shared/pkg/models/envbuild" @@ -83,6 +84,7 @@ func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ accesstoken.Table: accesstoken.ValidColumn, + cluster.Table: cluster.ValidColumn, env.Table: env.ValidColumn, envalias.Table: envalias.ValidColumn, envbuild.Table: envbuild.ValidColumn, diff --git a/packages/shared/pkg/models/env.go b/packages/shared/pkg/models/env.go index 5cb0b99..613aa3e 100644 --- a/packages/shared/pkg/models/env.go +++ b/packages/shared/pkg/models/env.go @@ -36,6 +36,8 @@ type Env struct { SpawnCount int64 `json:"spawn_count,omitempty"` // Timestamp of the last time the env was spawned LastSpawnedAt time.Time `json:"last_spawned_at,omitempty"` + // ClusterID holds the value of the "cluster_id" field. + ClusterID *uuid.UUID `json:"cluster_id,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the EnvQuery when eager-loading is set. Edges EnvEdges `json:"edges"` @@ -117,7 +119,7 @@ func (*Env) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case env.FieldCreatedBy: + case env.FieldCreatedBy, env.FieldClusterID: values[i] = &sql.NullScanner{S: new(uuid.UUID)} case env.FieldPublic: values[i] = new(sql.NullBool) @@ -199,6 +201,13 @@ func (e *Env) assignValues(columns []string, values []any) error { } else if value.Valid { e.LastSpawnedAt = value.Time } + case env.FieldClusterID: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field cluster_id", values[i]) + } else if value.Valid { + e.ClusterID = new(uuid.UUID) + *e.ClusterID = *value.S.(*uuid.UUID) + } default: e.selectValues.Set(columns[i], values[i]) } @@ -285,6 +294,11 @@ func (e *Env) String() string { builder.WriteString(", ") builder.WriteString("last_spawned_at=") builder.WriteString(e.LastSpawnedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := e.ClusterID; v != nil { + builder.WriteString("cluster_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } builder.WriteByte(')') return builder.String() } diff --git a/packages/shared/pkg/models/env/env.go b/packages/shared/pkg/models/env/env.go index ffeb720..340a162 100644 --- a/packages/shared/pkg/models/env/env.go +++ b/packages/shared/pkg/models/env/env.go @@ -30,6 +30,8 @@ const ( FieldSpawnCount = "spawn_count" // FieldLastSpawnedAt holds the string denoting the last_spawned_at field in the database. FieldLastSpawnedAt = "last_spawned_at" + // FieldClusterID holds the string denoting the cluster_id field in the database. + FieldClusterID = "cluster_id" // EdgeTeam holds the string denoting the team edge name in mutations. EdgeTeam = "team" // EdgeCreator holds the string denoting the creator edge name in mutations. @@ -92,6 +94,7 @@ var Columns = []string{ FieldBuildCount, FieldSpawnCount, FieldLastSpawnedAt, + FieldClusterID, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -163,6 +166,11 @@ func ByLastSpawnedAt(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldLastSpawnedAt, opts...).ToFunc() } +// ByClusterID orders the results by the cluster_id field. +func ByClusterID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClusterID, opts...).ToFunc() +} + // ByTeamField orders the results by team field. func ByTeamField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/env/where.go b/packages/shared/pkg/models/env/where.go index 64d5520..01f220f 100644 --- a/packages/shared/pkg/models/env/where.go +++ b/packages/shared/pkg/models/env/where.go @@ -107,6 +107,11 @@ func LastSpawnedAt(v time.Time) predicate.Env { return predicate.Env(sql.FieldEQ(FieldLastSpawnedAt, v)) } +// ClusterID applies equality check predicate on the "cluster_id" field. It's identical to ClusterIDEQ. +func ClusterID(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldEQ(FieldClusterID, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Env { return predicate.Env(sql.FieldEQ(FieldCreatedAt, v)) @@ -377,6 +382,56 @@ func LastSpawnedAtNotNil() predicate.Env { return predicate.Env(sql.FieldNotNull(FieldLastSpawnedAt)) } +// ClusterIDEQ applies the EQ predicate on the "cluster_id" field. +func ClusterIDEQ(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldEQ(FieldClusterID, v)) +} + +// ClusterIDNEQ applies the NEQ predicate on the "cluster_id" field. +func ClusterIDNEQ(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldNEQ(FieldClusterID, v)) +} + +// ClusterIDIn applies the In predicate on the "cluster_id" field. +func ClusterIDIn(vs ...uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldIn(FieldClusterID, vs...)) +} + +// ClusterIDNotIn applies the NotIn predicate on the "cluster_id" field. +func ClusterIDNotIn(vs ...uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldNotIn(FieldClusterID, vs...)) +} + +// ClusterIDGT applies the GT predicate on the "cluster_id" field. +func ClusterIDGT(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldGT(FieldClusterID, v)) +} + +// ClusterIDGTE applies the GTE predicate on the "cluster_id" field. +func ClusterIDGTE(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldGTE(FieldClusterID, v)) +} + +// ClusterIDLT applies the LT predicate on the "cluster_id" field. +func ClusterIDLT(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldLT(FieldClusterID, v)) +} + +// ClusterIDLTE applies the LTE predicate on the "cluster_id" field. +func ClusterIDLTE(v uuid.UUID) predicate.Env { + return predicate.Env(sql.FieldLTE(FieldClusterID, v)) +} + +// ClusterIDIsNil applies the IsNil predicate on the "cluster_id" field. +func ClusterIDIsNil() predicate.Env { + return predicate.Env(sql.FieldIsNull(FieldClusterID)) +} + +// ClusterIDNotNil applies the NotNil predicate on the "cluster_id" field. +func ClusterIDNotNil() predicate.Env { + return predicate.Env(sql.FieldNotNull(FieldClusterID)) +} + // HasTeam applies the HasEdge predicate on the "team" edge. func HasTeam() predicate.Env { return predicate.Env(func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/env_create.go b/packages/shared/pkg/models/env_create.go index cc24bcc..3b657aa 100644 --- a/packages/shared/pkg/models/env_create.go +++ b/packages/shared/pkg/models/env_create.go @@ -125,6 +125,20 @@ func (ec *EnvCreate) SetNillableLastSpawnedAt(t *time.Time) *EnvCreate { return ec } +// SetClusterID sets the "cluster_id" field. +func (ec *EnvCreate) SetClusterID(u uuid.UUID) *EnvCreate { + ec.mutation.SetClusterID(u) + return ec +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (ec *EnvCreate) SetNillableClusterID(u *uuid.UUID) *EnvCreate { + if u != nil { + ec.SetClusterID(*u) + } + return ec +} + // SetID sets the "id" field. func (ec *EnvCreate) SetID(s string) *EnvCreate { ec.mutation.SetID(s) @@ -337,6 +351,10 @@ func (ec *EnvCreate) createSpec() (*Env, *sqlgraph.CreateSpec) { _spec.SetField(env.FieldLastSpawnedAt, field.TypeTime, value) _node.LastSpawnedAt = value } + if value, ok := ec.mutation.ClusterID(); ok { + _spec.SetField(env.FieldClusterID, field.TypeUUID, value) + _node.ClusterID = &value + } if nodes := ec.mutation.TeamIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -584,6 +602,24 @@ func (u *EnvUpsert) ClearLastSpawnedAt() *EnvUpsert { return u } +// SetClusterID sets the "cluster_id" field. +func (u *EnvUpsert) SetClusterID(v uuid.UUID) *EnvUpsert { + u.Set(env.FieldClusterID, v) + return u +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *EnvUpsert) UpdateClusterID() *EnvUpsert { + u.SetExcluded(env.FieldClusterID) + return u +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *EnvUpsert) ClearClusterID() *EnvUpsert { + u.SetNull(env.FieldClusterID) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. // Using this option is equivalent to using: // @@ -761,6 +797,27 @@ func (u *EnvUpsertOne) ClearLastSpawnedAt() *EnvUpsertOne { }) } +// SetClusterID sets the "cluster_id" field. +func (u *EnvUpsertOne) SetClusterID(v uuid.UUID) *EnvUpsertOne { + return u.Update(func(s *EnvUpsert) { + s.SetClusterID(v) + }) +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *EnvUpsertOne) UpdateClusterID() *EnvUpsertOne { + return u.Update(func(s *EnvUpsert) { + s.UpdateClusterID() + }) +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *EnvUpsertOne) ClearClusterID() *EnvUpsertOne { + return u.Update(func(s *EnvUpsert) { + s.ClearClusterID() + }) +} + // Exec executes the query. func (u *EnvUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -1105,6 +1162,27 @@ func (u *EnvUpsertBulk) ClearLastSpawnedAt() *EnvUpsertBulk { }) } +// SetClusterID sets the "cluster_id" field. +func (u *EnvUpsertBulk) SetClusterID(v uuid.UUID) *EnvUpsertBulk { + return u.Update(func(s *EnvUpsert) { + s.SetClusterID(v) + }) +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *EnvUpsertBulk) UpdateClusterID() *EnvUpsertBulk { + return u.Update(func(s *EnvUpsert) { + s.UpdateClusterID() + }) +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *EnvUpsertBulk) ClearClusterID() *EnvUpsertBulk { + return u.Update(func(s *EnvUpsert) { + s.ClearClusterID() + }) +} + // Exec executes the query. func (u *EnvUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/packages/shared/pkg/models/env_update.go b/packages/shared/pkg/models/env_update.go index 91124d9..3a05a2a 100644 --- a/packages/shared/pkg/models/env_update.go +++ b/packages/shared/pkg/models/env_update.go @@ -160,6 +160,26 @@ func (eu *EnvUpdate) ClearLastSpawnedAt() *EnvUpdate { return eu } +// SetClusterID sets the "cluster_id" field. +func (eu *EnvUpdate) SetClusterID(u uuid.UUID) *EnvUpdate { + eu.mutation.SetClusterID(u) + return eu +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (eu *EnvUpdate) SetNillableClusterID(u *uuid.UUID) *EnvUpdate { + if u != nil { + eu.SetClusterID(*u) + } + return eu +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (eu *EnvUpdate) ClearClusterID() *EnvUpdate { + eu.mutation.ClearClusterID() + return eu +} + // SetTeam sets the "team" edge to the Team entity. func (eu *EnvUpdate) SetTeam(t *Team) *EnvUpdate { return eu.SetTeamID(t.ID) @@ -386,6 +406,12 @@ func (eu *EnvUpdate) sqlSave(ctx context.Context) (n int, err error) { if eu.mutation.LastSpawnedAtCleared() { _spec.ClearField(env.FieldLastSpawnedAt, field.TypeTime) } + if value, ok := eu.mutation.ClusterID(); ok { + _spec.SetField(env.FieldClusterID, field.TypeUUID, value) + } + if eu.mutation.ClusterIDCleared() { + _spec.ClearField(env.FieldClusterID, field.TypeUUID) + } if eu.mutation.TeamCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -740,6 +766,26 @@ func (euo *EnvUpdateOne) ClearLastSpawnedAt() *EnvUpdateOne { return euo } +// SetClusterID sets the "cluster_id" field. +func (euo *EnvUpdateOne) SetClusterID(u uuid.UUID) *EnvUpdateOne { + euo.mutation.SetClusterID(u) + return euo +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (euo *EnvUpdateOne) SetNillableClusterID(u *uuid.UUID) *EnvUpdateOne { + if u != nil { + euo.SetClusterID(*u) + } + return euo +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (euo *EnvUpdateOne) ClearClusterID() *EnvUpdateOne { + euo.mutation.ClearClusterID() + return euo +} + // SetTeam sets the "team" edge to the Team entity. func (euo *EnvUpdateOne) SetTeam(t *Team) *EnvUpdateOne { return euo.SetTeamID(t.ID) @@ -996,6 +1042,12 @@ func (euo *EnvUpdateOne) sqlSave(ctx context.Context) (_node *Env, err error) { if euo.mutation.LastSpawnedAtCleared() { _spec.ClearField(env.FieldLastSpawnedAt, field.TypeTime) } + if value, ok := euo.mutation.ClusterID(); ok { + _spec.SetField(env.FieldClusterID, field.TypeUUID, value) + } + if euo.mutation.ClusterIDCleared() { + _spec.ClearField(env.FieldClusterID, field.TypeUUID) + } if euo.mutation.TeamCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/packages/shared/pkg/models/envbuild.go b/packages/shared/pkg/models/envbuild.go index e364336..381d9e5 100644 --- a/packages/shared/pkg/models/envbuild.go +++ b/packages/shared/pkg/models/envbuild.go @@ -33,6 +33,8 @@ type EnvBuild struct { Dockerfile *string `json:"dockerfile,omitempty"` // StartCmd holds the value of the "start_cmd" field. StartCmd *string `json:"start_cmd,omitempty"` + // ReadyCmd holds the value of the "ready_cmd" field. + ReadyCmd *string `json:"ready_cmd,omitempty"` // Vcpu holds the value of the "vcpu" field. Vcpu int64 `json:"vcpu,omitempty"` // RAMMB holds the value of the "ram_mb" field. @@ -47,6 +49,8 @@ type EnvBuild struct { FirecrackerVersion string `json:"firecracker_version,omitempty"` // EnvdVersion holds the value of the "envd_version" field. EnvdVersion *string `json:"envd_version,omitempty"` + // ClusterNodeID holds the value of the "cluster_node_id" field. + ClusterNodeID *string `json:"cluster_node_id,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the EnvBuildQuery when eager-loading is set. Edges EnvBuildEdges `json:"edges"` @@ -82,7 +86,7 @@ func (*EnvBuild) scanValues(columns []string) ([]any, error) { switch columns[i] { case envbuild.FieldVcpu, envbuild.FieldRAMMB, envbuild.FieldFreeDiskSizeMB, envbuild.FieldTotalDiskSizeMB: values[i] = new(sql.NullInt64) - case envbuild.FieldEnvID, envbuild.FieldStatus, envbuild.FieldDockerfile, envbuild.FieldStartCmd, envbuild.FieldKernelVersion, envbuild.FieldFirecrackerVersion, envbuild.FieldEnvdVersion: + case envbuild.FieldEnvID, envbuild.FieldStatus, envbuild.FieldDockerfile, envbuild.FieldStartCmd, envbuild.FieldReadyCmd, envbuild.FieldKernelVersion, envbuild.FieldFirecrackerVersion, envbuild.FieldEnvdVersion, envbuild.FieldClusterNodeID: values[i] = new(sql.NullString) case envbuild.FieldCreatedAt, envbuild.FieldUpdatedAt, envbuild.FieldFinishedAt: values[i] = new(sql.NullTime) @@ -155,6 +159,13 @@ func (eb *EnvBuild) assignValues(columns []string, values []any) error { eb.StartCmd = new(string) *eb.StartCmd = value.String } + case envbuild.FieldReadyCmd: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ready_cmd", values[i]) + } else if value.Valid { + eb.ReadyCmd = new(string) + *eb.ReadyCmd = value.String + } case envbuild.FieldVcpu: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field vcpu", values[i]) @@ -199,6 +210,13 @@ func (eb *EnvBuild) assignValues(columns []string, values []any) error { eb.EnvdVersion = new(string) *eb.EnvdVersion = value.String } + case envbuild.FieldClusterNodeID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field cluster_node_id", values[i]) + } else if value.Valid { + eb.ClusterNodeID = new(string) + *eb.ClusterNodeID = value.String + } default: eb.selectValues.Set(columns[i], values[i]) } @@ -269,6 +287,11 @@ func (eb *EnvBuild) String() string { builder.WriteString(*v) } builder.WriteString(", ") + if v := eb.ReadyCmd; v != nil { + builder.WriteString("ready_cmd=") + builder.WriteString(*v) + } + builder.WriteString(", ") builder.WriteString("vcpu=") builder.WriteString(fmt.Sprintf("%v", eb.Vcpu)) builder.WriteString(", ") @@ -293,6 +316,11 @@ func (eb *EnvBuild) String() string { builder.WriteString("envd_version=") builder.WriteString(*v) } + builder.WriteString(", ") + if v := eb.ClusterNodeID; v != nil { + builder.WriteString("cluster_node_id=") + builder.WriteString(*v) + } builder.WriteByte(')') return builder.String() } diff --git a/packages/shared/pkg/models/envbuild/envbuild.go b/packages/shared/pkg/models/envbuild/envbuild.go index 1501d9e..e0fc3ca 100644 --- a/packages/shared/pkg/models/envbuild/envbuild.go +++ b/packages/shared/pkg/models/envbuild/envbuild.go @@ -29,6 +29,8 @@ const ( FieldDockerfile = "dockerfile" // FieldStartCmd holds the string denoting the start_cmd field in the database. FieldStartCmd = "start_cmd" + // FieldReadyCmd holds the string denoting the ready_cmd field in the database. + FieldReadyCmd = "ready_cmd" // FieldVcpu holds the string denoting the vcpu field in the database. FieldVcpu = "vcpu" // FieldRAMMB holds the string denoting the ram_mb field in the database. @@ -43,6 +45,8 @@ const ( FieldFirecrackerVersion = "firecracker_version" // FieldEnvdVersion holds the string denoting the envd_version field in the database. FieldEnvdVersion = "envd_version" + // FieldClusterNodeID holds the string denoting the cluster_node_id field in the database. + FieldClusterNodeID = "cluster_node_id" // EdgeEnv holds the string denoting the env edge name in mutations. EdgeEnv = "env" // Table holds the table name of the envbuild in the database. @@ -66,6 +70,7 @@ var Columns = []string{ FieldStatus, FieldDockerfile, FieldStartCmd, + FieldReadyCmd, FieldVcpu, FieldRAMMB, FieldFreeDiskSizeMB, @@ -73,6 +78,7 @@ var Columns = []string{ FieldKernelVersion, FieldFirecrackerVersion, FieldEnvdVersion, + FieldClusterNodeID, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -104,11 +110,12 @@ const DefaultStatus = StatusWaiting // Status values. const ( - StatusWaiting Status = "waiting" - StatusBuilding Status = "building" - StatusFailed Status = "failed" - StatusSuccess Status = "success" - StatusUploaded Status = "uploaded" + StatusWaiting Status = "waiting" + StatusBuilding Status = "building" + StatusSnapshotting Status = "snapshotting" + StatusFailed Status = "failed" + StatusSuccess Status = "success" + StatusUploaded Status = "uploaded" ) func (s Status) String() string { @@ -118,7 +125,7 @@ func (s Status) String() string { // StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. func StatusValidator(s Status) error { switch s { - case StatusWaiting, StatusBuilding, StatusFailed, StatusSuccess, StatusUploaded: + case StatusWaiting, StatusBuilding, StatusSnapshotting, StatusFailed, StatusSuccess, StatusUploaded: return nil default: return fmt.Errorf("envbuild: invalid enum value for status field: %q", s) @@ -168,6 +175,11 @@ func ByStartCmd(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldStartCmd, opts...).ToFunc() } +// ByReadyCmd orders the results by the ready_cmd field. +func ByReadyCmd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReadyCmd, opts...).ToFunc() +} + // ByVcpu orders the results by the vcpu field. func ByVcpu(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldVcpu, opts...).ToFunc() @@ -203,6 +215,11 @@ func ByEnvdVersion(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldEnvdVersion, opts...).ToFunc() } +// ByClusterNodeID orders the results by the cluster_node_id field. +func ByClusterNodeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClusterNodeID, opts...).ToFunc() +} + // ByEnvField orders the results by env field. func ByEnvField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/envbuild/where.go b/packages/shared/pkg/models/envbuild/where.go index a6cb25f..ff69c96 100644 --- a/packages/shared/pkg/models/envbuild/where.go +++ b/packages/shared/pkg/models/envbuild/where.go @@ -87,6 +87,11 @@ func StartCmd(v string) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldEQ(FieldStartCmd, v)) } +// ReadyCmd applies equality check predicate on the "ready_cmd" field. It's identical to ReadyCmdEQ. +func ReadyCmd(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEQ(FieldReadyCmd, v)) +} + // Vcpu applies equality check predicate on the "vcpu" field. It's identical to VcpuEQ. func Vcpu(v int64) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldEQ(FieldVcpu, v)) @@ -122,6 +127,11 @@ func EnvdVersion(v string) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldEQ(FieldEnvdVersion, v)) } +// ClusterNodeID applies equality check predicate on the "cluster_node_id" field. It's identical to ClusterNodeIDEQ. +func ClusterNodeID(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEQ(FieldClusterNodeID, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldEQ(FieldCreatedAt, v)) @@ -497,6 +507,81 @@ func StartCmdContainsFold(v string) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldContainsFold(FieldStartCmd, v)) } +// ReadyCmdEQ applies the EQ predicate on the "ready_cmd" field. +func ReadyCmdEQ(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEQ(FieldReadyCmd, v)) +} + +// ReadyCmdNEQ applies the NEQ predicate on the "ready_cmd" field. +func ReadyCmdNEQ(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNEQ(FieldReadyCmd, v)) +} + +// ReadyCmdIn applies the In predicate on the "ready_cmd" field. +func ReadyCmdIn(vs ...string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldIn(FieldReadyCmd, vs...)) +} + +// ReadyCmdNotIn applies the NotIn predicate on the "ready_cmd" field. +func ReadyCmdNotIn(vs ...string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNotIn(FieldReadyCmd, vs...)) +} + +// ReadyCmdGT applies the GT predicate on the "ready_cmd" field. +func ReadyCmdGT(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldGT(FieldReadyCmd, v)) +} + +// ReadyCmdGTE applies the GTE predicate on the "ready_cmd" field. +func ReadyCmdGTE(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldGTE(FieldReadyCmd, v)) +} + +// ReadyCmdLT applies the LT predicate on the "ready_cmd" field. +func ReadyCmdLT(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldLT(FieldReadyCmd, v)) +} + +// ReadyCmdLTE applies the LTE predicate on the "ready_cmd" field. +func ReadyCmdLTE(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldLTE(FieldReadyCmd, v)) +} + +// ReadyCmdContains applies the Contains predicate on the "ready_cmd" field. +func ReadyCmdContains(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldContains(FieldReadyCmd, v)) +} + +// ReadyCmdHasPrefix applies the HasPrefix predicate on the "ready_cmd" field. +func ReadyCmdHasPrefix(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldHasPrefix(FieldReadyCmd, v)) +} + +// ReadyCmdHasSuffix applies the HasSuffix predicate on the "ready_cmd" field. +func ReadyCmdHasSuffix(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldHasSuffix(FieldReadyCmd, v)) +} + +// ReadyCmdIsNil applies the IsNil predicate on the "ready_cmd" field. +func ReadyCmdIsNil() predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldIsNull(FieldReadyCmd)) +} + +// ReadyCmdNotNil applies the NotNil predicate on the "ready_cmd" field. +func ReadyCmdNotNil() predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNotNull(FieldReadyCmd)) +} + +// ReadyCmdEqualFold applies the EqualFold predicate on the "ready_cmd" field. +func ReadyCmdEqualFold(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEqualFold(FieldReadyCmd, v)) +} + +// ReadyCmdContainsFold applies the ContainsFold predicate on the "ready_cmd" field. +func ReadyCmdContainsFold(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldContainsFold(FieldReadyCmd, v)) +} + // VcpuEQ applies the EQ predicate on the "vcpu" field. func VcpuEQ(v int64) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldEQ(FieldVcpu, v)) @@ -872,6 +957,81 @@ func EnvdVersionContainsFold(v string) predicate.EnvBuild { return predicate.EnvBuild(sql.FieldContainsFold(FieldEnvdVersion, v)) } +// ClusterNodeIDEQ applies the EQ predicate on the "cluster_node_id" field. +func ClusterNodeIDEQ(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEQ(FieldClusterNodeID, v)) +} + +// ClusterNodeIDNEQ applies the NEQ predicate on the "cluster_node_id" field. +func ClusterNodeIDNEQ(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNEQ(FieldClusterNodeID, v)) +} + +// ClusterNodeIDIn applies the In predicate on the "cluster_node_id" field. +func ClusterNodeIDIn(vs ...string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldIn(FieldClusterNodeID, vs...)) +} + +// ClusterNodeIDNotIn applies the NotIn predicate on the "cluster_node_id" field. +func ClusterNodeIDNotIn(vs ...string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNotIn(FieldClusterNodeID, vs...)) +} + +// ClusterNodeIDGT applies the GT predicate on the "cluster_node_id" field. +func ClusterNodeIDGT(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldGT(FieldClusterNodeID, v)) +} + +// ClusterNodeIDGTE applies the GTE predicate on the "cluster_node_id" field. +func ClusterNodeIDGTE(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldGTE(FieldClusterNodeID, v)) +} + +// ClusterNodeIDLT applies the LT predicate on the "cluster_node_id" field. +func ClusterNodeIDLT(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldLT(FieldClusterNodeID, v)) +} + +// ClusterNodeIDLTE applies the LTE predicate on the "cluster_node_id" field. +func ClusterNodeIDLTE(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldLTE(FieldClusterNodeID, v)) +} + +// ClusterNodeIDContains applies the Contains predicate on the "cluster_node_id" field. +func ClusterNodeIDContains(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldContains(FieldClusterNodeID, v)) +} + +// ClusterNodeIDHasPrefix applies the HasPrefix predicate on the "cluster_node_id" field. +func ClusterNodeIDHasPrefix(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldHasPrefix(FieldClusterNodeID, v)) +} + +// ClusterNodeIDHasSuffix applies the HasSuffix predicate on the "cluster_node_id" field. +func ClusterNodeIDHasSuffix(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldHasSuffix(FieldClusterNodeID, v)) +} + +// ClusterNodeIDIsNil applies the IsNil predicate on the "cluster_node_id" field. +func ClusterNodeIDIsNil() predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldIsNull(FieldClusterNodeID)) +} + +// ClusterNodeIDNotNil applies the NotNil predicate on the "cluster_node_id" field. +func ClusterNodeIDNotNil() predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldNotNull(FieldClusterNodeID)) +} + +// ClusterNodeIDEqualFold applies the EqualFold predicate on the "cluster_node_id" field. +func ClusterNodeIDEqualFold(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldEqualFold(FieldClusterNodeID, v)) +} + +// ClusterNodeIDContainsFold applies the ContainsFold predicate on the "cluster_node_id" field. +func ClusterNodeIDContainsFold(v string) predicate.EnvBuild { + return predicate.EnvBuild(sql.FieldContainsFold(FieldClusterNodeID, v)) +} + // HasEnv applies the HasEdge predicate on the "env" edge. func HasEnv() predicate.EnvBuild { return predicate.EnvBuild(func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/envbuild_create.go b/packages/shared/pkg/models/envbuild_create.go index cea1edd..911fb1e 100644 --- a/packages/shared/pkg/models/envbuild_create.go +++ b/packages/shared/pkg/models/envbuild_create.go @@ -123,6 +123,20 @@ func (ebc *EnvBuildCreate) SetNillableStartCmd(s *string) *EnvBuildCreate { return ebc } +// SetReadyCmd sets the "ready_cmd" field. +func (ebc *EnvBuildCreate) SetReadyCmd(s string) *EnvBuildCreate { + ebc.mutation.SetReadyCmd(s) + return ebc +} + +// SetNillableReadyCmd sets the "ready_cmd" field if the given value is not nil. +func (ebc *EnvBuildCreate) SetNillableReadyCmd(s *string) *EnvBuildCreate { + if s != nil { + ebc.SetReadyCmd(*s) + } + return ebc +} + // SetVcpu sets the "vcpu" field. func (ebc *EnvBuildCreate) SetVcpu(i int64) *EnvBuildCreate { ebc.mutation.SetVcpu(i) @@ -197,6 +211,20 @@ func (ebc *EnvBuildCreate) SetNillableEnvdVersion(s *string) *EnvBuildCreate { return ebc } +// SetClusterNodeID sets the "cluster_node_id" field. +func (ebc *EnvBuildCreate) SetClusterNodeID(s string) *EnvBuildCreate { + ebc.mutation.SetClusterNodeID(s) + return ebc +} + +// SetNillableClusterNodeID sets the "cluster_node_id" field if the given value is not nil. +func (ebc *EnvBuildCreate) SetNillableClusterNodeID(s *string) *EnvBuildCreate { + if s != nil { + ebc.SetClusterNodeID(*s) + } + return ebc +} + // SetID sets the "id" field. func (ebc *EnvBuildCreate) SetID(u uuid.UUID) *EnvBuildCreate { ebc.mutation.SetID(u) @@ -357,6 +385,10 @@ func (ebc *EnvBuildCreate) createSpec() (*EnvBuild, *sqlgraph.CreateSpec) { _spec.SetField(envbuild.FieldStartCmd, field.TypeString, value) _node.StartCmd = &value } + if value, ok := ebc.mutation.ReadyCmd(); ok { + _spec.SetField(envbuild.FieldReadyCmd, field.TypeString, value) + _node.ReadyCmd = &value + } if value, ok := ebc.mutation.Vcpu(); ok { _spec.SetField(envbuild.FieldVcpu, field.TypeInt64, value) _node.Vcpu = value @@ -385,6 +417,10 @@ func (ebc *EnvBuildCreate) createSpec() (*EnvBuild, *sqlgraph.CreateSpec) { _spec.SetField(envbuild.FieldEnvdVersion, field.TypeString, value) _node.EnvdVersion = &value } + if value, ok := ebc.mutation.ClusterNodeID(); ok { + _spec.SetField(envbuild.FieldClusterNodeID, field.TypeString, value) + _node.ClusterNodeID = &value + } if nodes := ebc.mutation.EnvIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -551,6 +587,24 @@ func (u *EnvBuildUpsert) ClearStartCmd() *EnvBuildUpsert { return u } +// SetReadyCmd sets the "ready_cmd" field. +func (u *EnvBuildUpsert) SetReadyCmd(v string) *EnvBuildUpsert { + u.Set(envbuild.FieldReadyCmd, v) + return u +} + +// UpdateReadyCmd sets the "ready_cmd" field to the value that was provided on create. +func (u *EnvBuildUpsert) UpdateReadyCmd() *EnvBuildUpsert { + u.SetExcluded(envbuild.FieldReadyCmd) + return u +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (u *EnvBuildUpsert) ClearReadyCmd() *EnvBuildUpsert { + u.SetNull(envbuild.FieldReadyCmd) + return u +} + // SetVcpu sets the "vcpu" field. func (u *EnvBuildUpsert) SetVcpu(v int64) *EnvBuildUpsert { u.Set(envbuild.FieldVcpu, v) @@ -671,6 +725,24 @@ func (u *EnvBuildUpsert) ClearEnvdVersion() *EnvBuildUpsert { return u } +// SetClusterNodeID sets the "cluster_node_id" field. +func (u *EnvBuildUpsert) SetClusterNodeID(v string) *EnvBuildUpsert { + u.Set(envbuild.FieldClusterNodeID, v) + return u +} + +// UpdateClusterNodeID sets the "cluster_node_id" field to the value that was provided on create. +func (u *EnvBuildUpsert) UpdateClusterNodeID() *EnvBuildUpsert { + u.SetExcluded(envbuild.FieldClusterNodeID) + return u +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (u *EnvBuildUpsert) ClearClusterNodeID() *EnvBuildUpsert { + u.SetNull(envbuild.FieldClusterNodeID) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. // Using this option is equivalent to using: // @@ -834,6 +906,27 @@ func (u *EnvBuildUpsertOne) ClearStartCmd() *EnvBuildUpsertOne { }) } +// SetReadyCmd sets the "ready_cmd" field. +func (u *EnvBuildUpsertOne) SetReadyCmd(v string) *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.SetReadyCmd(v) + }) +} + +// UpdateReadyCmd sets the "ready_cmd" field to the value that was provided on create. +func (u *EnvBuildUpsertOne) UpdateReadyCmd() *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.UpdateReadyCmd() + }) +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (u *EnvBuildUpsertOne) ClearReadyCmd() *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.ClearReadyCmd() + }) +} + // SetVcpu sets the "vcpu" field. func (u *EnvBuildUpsertOne) SetVcpu(v int64) *EnvBuildUpsertOne { return u.Update(func(s *EnvBuildUpsert) { @@ -974,6 +1067,27 @@ func (u *EnvBuildUpsertOne) ClearEnvdVersion() *EnvBuildUpsertOne { }) } +// SetClusterNodeID sets the "cluster_node_id" field. +func (u *EnvBuildUpsertOne) SetClusterNodeID(v string) *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.SetClusterNodeID(v) + }) +} + +// UpdateClusterNodeID sets the "cluster_node_id" field to the value that was provided on create. +func (u *EnvBuildUpsertOne) UpdateClusterNodeID() *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.UpdateClusterNodeID() + }) +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (u *EnvBuildUpsertOne) ClearClusterNodeID() *EnvBuildUpsertOne { + return u.Update(func(s *EnvBuildUpsert) { + s.ClearClusterNodeID() + }) +} + // Exec executes the query. func (u *EnvBuildUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -1304,6 +1418,27 @@ func (u *EnvBuildUpsertBulk) ClearStartCmd() *EnvBuildUpsertBulk { }) } +// SetReadyCmd sets the "ready_cmd" field. +func (u *EnvBuildUpsertBulk) SetReadyCmd(v string) *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.SetReadyCmd(v) + }) +} + +// UpdateReadyCmd sets the "ready_cmd" field to the value that was provided on create. +func (u *EnvBuildUpsertBulk) UpdateReadyCmd() *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.UpdateReadyCmd() + }) +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (u *EnvBuildUpsertBulk) ClearReadyCmd() *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.ClearReadyCmd() + }) +} + // SetVcpu sets the "vcpu" field. func (u *EnvBuildUpsertBulk) SetVcpu(v int64) *EnvBuildUpsertBulk { return u.Update(func(s *EnvBuildUpsert) { @@ -1444,6 +1579,27 @@ func (u *EnvBuildUpsertBulk) ClearEnvdVersion() *EnvBuildUpsertBulk { }) } +// SetClusterNodeID sets the "cluster_node_id" field. +func (u *EnvBuildUpsertBulk) SetClusterNodeID(v string) *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.SetClusterNodeID(v) + }) +} + +// UpdateClusterNodeID sets the "cluster_node_id" field to the value that was provided on create. +func (u *EnvBuildUpsertBulk) UpdateClusterNodeID() *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.UpdateClusterNodeID() + }) +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (u *EnvBuildUpsertBulk) ClearClusterNodeID() *EnvBuildUpsertBulk { + return u.Update(func(s *EnvBuildUpsert) { + s.ClearClusterNodeID() + }) +} + // Exec executes the query. func (u *EnvBuildUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/packages/shared/pkg/models/envbuild_update.go b/packages/shared/pkg/models/envbuild_update.go index 639cf44..9f74fed 100644 --- a/packages/shared/pkg/models/envbuild_update.go +++ b/packages/shared/pkg/models/envbuild_update.go @@ -139,6 +139,26 @@ func (ebu *EnvBuildUpdate) ClearStartCmd() *EnvBuildUpdate { return ebu } +// SetReadyCmd sets the "ready_cmd" field. +func (ebu *EnvBuildUpdate) SetReadyCmd(s string) *EnvBuildUpdate { + ebu.mutation.SetReadyCmd(s) + return ebu +} + +// SetNillableReadyCmd sets the "ready_cmd" field if the given value is not nil. +func (ebu *EnvBuildUpdate) SetNillableReadyCmd(s *string) *EnvBuildUpdate { + if s != nil { + ebu.SetReadyCmd(*s) + } + return ebu +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (ebu *EnvBuildUpdate) ClearReadyCmd() *EnvBuildUpdate { + ebu.mutation.ClearReadyCmd() + return ebu +} + // SetVcpu sets the "vcpu" field. func (ebu *EnvBuildUpdate) SetVcpu(i int64) *EnvBuildUpdate { ebu.mutation.ResetVcpu() @@ -277,6 +297,26 @@ func (ebu *EnvBuildUpdate) ClearEnvdVersion() *EnvBuildUpdate { return ebu } +// SetClusterNodeID sets the "cluster_node_id" field. +func (ebu *EnvBuildUpdate) SetClusterNodeID(s string) *EnvBuildUpdate { + ebu.mutation.SetClusterNodeID(s) + return ebu +} + +// SetNillableClusterNodeID sets the "cluster_node_id" field if the given value is not nil. +func (ebu *EnvBuildUpdate) SetNillableClusterNodeID(s *string) *EnvBuildUpdate { + if s != nil { + ebu.SetClusterNodeID(*s) + } + return ebu +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (ebu *EnvBuildUpdate) ClearClusterNodeID() *EnvBuildUpdate { + ebu.mutation.ClearClusterNodeID() + return ebu +} + // SetEnv sets the "env" edge to the Env entity. func (ebu *EnvBuildUpdate) SetEnv(e *Env) *EnvBuildUpdate { return ebu.SetEnvID(e.ID) @@ -372,6 +412,12 @@ func (ebu *EnvBuildUpdate) sqlSave(ctx context.Context) (n int, err error) { if ebu.mutation.StartCmdCleared() { _spec.ClearField(envbuild.FieldStartCmd, field.TypeString) } + if value, ok := ebu.mutation.ReadyCmd(); ok { + _spec.SetField(envbuild.FieldReadyCmd, field.TypeString, value) + } + if ebu.mutation.ReadyCmdCleared() { + _spec.ClearField(envbuild.FieldReadyCmd, field.TypeString) + } if value, ok := ebu.mutation.Vcpu(); ok { _spec.SetField(envbuild.FieldVcpu, field.TypeInt64, value) } @@ -411,6 +457,12 @@ func (ebu *EnvBuildUpdate) sqlSave(ctx context.Context) (n int, err error) { if ebu.mutation.EnvdVersionCleared() { _spec.ClearField(envbuild.FieldEnvdVersion, field.TypeString) } + if value, ok := ebu.mutation.ClusterNodeID(); ok { + _spec.SetField(envbuild.FieldClusterNodeID, field.TypeString, value) + } + if ebu.mutation.ClusterNodeIDCleared() { + _spec.ClearField(envbuild.FieldClusterNodeID, field.TypeString) + } if ebu.mutation.EnvCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -574,6 +626,26 @@ func (ebuo *EnvBuildUpdateOne) ClearStartCmd() *EnvBuildUpdateOne { return ebuo } +// SetReadyCmd sets the "ready_cmd" field. +func (ebuo *EnvBuildUpdateOne) SetReadyCmd(s string) *EnvBuildUpdateOne { + ebuo.mutation.SetReadyCmd(s) + return ebuo +} + +// SetNillableReadyCmd sets the "ready_cmd" field if the given value is not nil. +func (ebuo *EnvBuildUpdateOne) SetNillableReadyCmd(s *string) *EnvBuildUpdateOne { + if s != nil { + ebuo.SetReadyCmd(*s) + } + return ebuo +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (ebuo *EnvBuildUpdateOne) ClearReadyCmd() *EnvBuildUpdateOne { + ebuo.mutation.ClearReadyCmd() + return ebuo +} + // SetVcpu sets the "vcpu" field. func (ebuo *EnvBuildUpdateOne) SetVcpu(i int64) *EnvBuildUpdateOne { ebuo.mutation.ResetVcpu() @@ -712,6 +784,26 @@ func (ebuo *EnvBuildUpdateOne) ClearEnvdVersion() *EnvBuildUpdateOne { return ebuo } +// SetClusterNodeID sets the "cluster_node_id" field. +func (ebuo *EnvBuildUpdateOne) SetClusterNodeID(s string) *EnvBuildUpdateOne { + ebuo.mutation.SetClusterNodeID(s) + return ebuo +} + +// SetNillableClusterNodeID sets the "cluster_node_id" field if the given value is not nil. +func (ebuo *EnvBuildUpdateOne) SetNillableClusterNodeID(s *string) *EnvBuildUpdateOne { + if s != nil { + ebuo.SetClusterNodeID(*s) + } + return ebuo +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (ebuo *EnvBuildUpdateOne) ClearClusterNodeID() *EnvBuildUpdateOne { + ebuo.mutation.ClearClusterNodeID() + return ebuo +} + // SetEnv sets the "env" edge to the Env entity. func (ebuo *EnvBuildUpdateOne) SetEnv(e *Env) *EnvBuildUpdateOne { return ebuo.SetEnvID(e.ID) @@ -837,6 +929,12 @@ func (ebuo *EnvBuildUpdateOne) sqlSave(ctx context.Context) (_node *EnvBuild, er if ebuo.mutation.StartCmdCleared() { _spec.ClearField(envbuild.FieldStartCmd, field.TypeString) } + if value, ok := ebuo.mutation.ReadyCmd(); ok { + _spec.SetField(envbuild.FieldReadyCmd, field.TypeString, value) + } + if ebuo.mutation.ReadyCmdCleared() { + _spec.ClearField(envbuild.FieldReadyCmd, field.TypeString) + } if value, ok := ebuo.mutation.Vcpu(); ok { _spec.SetField(envbuild.FieldVcpu, field.TypeInt64, value) } @@ -876,6 +974,12 @@ func (ebuo *EnvBuildUpdateOne) sqlSave(ctx context.Context) (_node *EnvBuild, er if ebuo.mutation.EnvdVersionCleared() { _spec.ClearField(envbuild.FieldEnvdVersion, field.TypeString) } + if value, ok := ebuo.mutation.ClusterNodeID(); ok { + _spec.SetField(envbuild.FieldClusterNodeID, field.TypeString, value) + } + if ebuo.mutation.ClusterNodeIDCleared() { + _spec.ClearField(envbuild.FieldClusterNodeID, field.TypeString) + } if ebuo.mutation.EnvCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/packages/shared/pkg/models/hook/hook.go b/packages/shared/pkg/models/hook/hook.go index 2923349..456a7b9 100644 --- a/packages/shared/pkg/models/hook/hook.go +++ b/packages/shared/pkg/models/hook/hook.go @@ -21,6 +21,18 @@ func (f AccessTokenFunc) Mutate(ctx context.Context, m models.Mutation) (models. return nil, fmt.Errorf("unexpected mutation type %T. expect *models.AccessTokenMutation", m) } +// The ClusterFunc type is an adapter to allow the use of ordinary +// function as Cluster mutator. +type ClusterFunc func(context.Context, *models.ClusterMutation) (models.Value, error) + +// Mutate calls f(ctx, m). +func (f ClusterFunc) Mutate(ctx context.Context, m models.Mutation) (models.Value, error) { + if mv, ok := m.(*models.ClusterMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *models.ClusterMutation", m) +} + // The EnvFunc type is an adapter to allow the use of ordinary // function as Env mutator. type EnvFunc func(context.Context, *models.EnvMutation) (models.Value, error) diff --git a/packages/shared/pkg/models/internal/schemaconfig.go b/packages/shared/pkg/models/internal/schemaconfig.go index 61fe3a8..ae148a5 100644 --- a/packages/shared/pkg/models/internal/schemaconfig.go +++ b/packages/shared/pkg/models/internal/schemaconfig.go @@ -8,6 +8,7 @@ import "context" // that can be passed at runtime. type SchemaConfig struct { AccessToken string // AccessToken table. + Cluster string // Cluster table. Env string // Env table. EnvAlias string // EnvAlias table. EnvBuild string // EnvBuild table. diff --git a/packages/shared/pkg/models/migrate/schema.go b/packages/shared/pkg/models/migrate/schema.go index 7035379..19cb08a 100644 --- a/packages/shared/pkg/models/migrate/schema.go +++ b/packages/shared/pkg/models/migrate/schema.go @@ -11,7 +11,14 @@ import ( var ( // AccessTokensColumns holds the columns for the "access_tokens" table. AccessTokensColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID, Unique: true, Default: "gen_random_uuid()"}, {Name: "access_token", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "access_token_hash", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "access_token_prefix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(10)"}}, + {Name: "access_token_length", Type: field.TypeInt}, + {Name: "access_token_mask_prefix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(5)"}}, + {Name: "access_token_mask_suffix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(5)"}}, + {Name: "name", Type: field.TypeString, Default: "Unnamed Access Token", SchemaType: map[string]string{"postgres": "text"}}, {Name: "created_at", Type: field.TypeTime, Nullable: true, Default: "CURRENT_TIMESTAMP"}, {Name: "user_id", Type: field.TypeUUID}, } @@ -23,12 +30,25 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "access_tokens_users_access_tokens", - Columns: []*schema.Column{AccessTokensColumns[2]}, + Columns: []*schema.Column{AccessTokensColumns[9]}, RefColumns: []*schema.Column{UsersColumns[0]}, OnDelete: schema.Cascade, }, }, } + // ClustersColumns holds the columns for the "clusters" table. + ClustersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID, Unique: true, Default: "gen_random_uuid()"}, + {Name: "endpoint", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "endpoint_tls", Type: field.TypeBool, Default: true}, + {Name: "token", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + } + // ClustersTable holds the schema information for the "clusters" table. + ClustersTable = &schema.Table{ + Name: "clusters", + Columns: ClustersColumns, + PrimaryKey: []*schema.Column{ClustersColumns[0]}, + } // EnvsColumns holds the columns for the "envs" table. EnvsColumns = []*schema.Column{ {Name: "id", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, @@ -38,6 +58,7 @@ var ( {Name: "build_count", Type: field.TypeInt32, Default: 1}, {Name: "spawn_count", Type: field.TypeInt64, Comment: "Number of times the env was spawned", Default: 0}, {Name: "last_spawned_at", Type: field.TypeTime, Nullable: true, Comment: "Timestamp of the last time the env was spawned"}, + {Name: "cluster_id", Type: field.TypeUUID, Nullable: true, SchemaType: map[string]string{"postgres": "uuid"}}, {Name: "team_id", Type: field.TypeUUID}, {Name: "created_by", Type: field.TypeUUID, Nullable: true}, } @@ -49,13 +70,13 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "envs_teams_envs", - Columns: []*schema.Column{EnvsColumns[7]}, + Columns: []*schema.Column{EnvsColumns[8]}, RefColumns: []*schema.Column{TeamsColumns[0]}, OnDelete: schema.NoAction, }, { Symbol: "envs_users_created_envs", - Columns: []*schema.Column{EnvsColumns[8]}, + Columns: []*schema.Column{EnvsColumns[9]}, RefColumns: []*schema.Column{UsersColumns[0]}, OnDelete: schema.SetNull, }, @@ -87,9 +108,10 @@ var ( {Name: "created_at", Type: field.TypeTime, Default: "CURRENT_TIMESTAMP"}, {Name: "updated_at", Type: field.TypeTime}, {Name: "finished_at", Type: field.TypeTime, Nullable: true}, - {Name: "status", Type: field.TypeEnum, Enums: []string{"waiting", "building", "failed", "success", "uploaded"}, Default: "waiting", SchemaType: map[string]string{"postgres": "text"}}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"waiting", "building", "snapshotting", "failed", "success", "uploaded"}, Default: "waiting", SchemaType: map[string]string{"postgres": "text"}}, {Name: "dockerfile", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "start_cmd", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "ready_cmd", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "vcpu", Type: field.TypeInt64}, {Name: "ram_mb", Type: field.TypeInt64}, {Name: "free_disk_size_mb", Type: field.TypeInt64}, @@ -97,6 +119,7 @@ var ( {Name: "kernel_version", Type: field.TypeString, Default: "vmlinux-6.1.102", SchemaType: map[string]string{"postgres": "text"}}, {Name: "firecracker_version", Type: field.TypeString, Default: "v1.10.1_1fcdaec", SchemaType: map[string]string{"postgres": "text"}}, {Name: "envd_version", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "cluster_node_id", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "env_id", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, } // EnvBuildsTable holds the schema information for the "env_builds" table. @@ -107,7 +130,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "env_builds_envs_builds", - Columns: []*schema.Column{EnvBuildsColumns[14]}, + Columns: []*schema.Column{EnvBuildsColumns[16]}, RefColumns: []*schema.Column{EnvsColumns[0]}, OnDelete: schema.Cascade, }, @@ -120,6 +143,8 @@ var ( {Name: "base_env_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, {Name: "sandbox_id", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "metadata", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "sandbox_started_at", Type: field.TypeTime}, + {Name: "env_secure", Type: field.TypeBool, Default: false}, {Name: "env_id", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, } // SnapshotsTable holds the schema information for the "snapshots" table. @@ -130,7 +155,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "snapshots_envs_snapshots", - Columns: []*schema.Column{SnapshotsColumns[5]}, + Columns: []*schema.Column{SnapshotsColumns[7]}, RefColumns: []*schema.Column{EnvsColumns[0]}, OnDelete: schema.Cascade, }, @@ -145,6 +170,7 @@ var ( {Name: "blocked_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "name", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, {Name: "email", Type: field.TypeString, Size: 255, SchemaType: map[string]string{"postgres": "character varying(255)"}}, + {Name: "cluster_id", Type: field.TypeUUID, Nullable: true, SchemaType: map[string]string{"postgres": "uuid"}}, {Name: "tier", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, } // TeamsTable holds the schema information for the "teams" table. @@ -155,7 +181,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "teams_tiers_teams", - Columns: []*schema.Column{TeamsColumns[7]}, + Columns: []*schema.Column{TeamsColumns[8]}, RefColumns: []*schema.Column{TiersColumns[0]}, OnDelete: schema.NoAction, }, @@ -165,6 +191,11 @@ var ( TeamAPIKeysColumns = []*schema.Column{ {Name: "id", Type: field.TypeUUID, Unique: true, Default: "gen_random_uuid()"}, {Name: "api_key", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "character varying(44)"}}, + {Name: "api_key_hash", Type: field.TypeString, Unique: true, SchemaType: map[string]string{"postgres": "character varying(64)"}}, + {Name: "api_key_prefix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(10)"}}, + {Name: "api_key_length", Type: field.TypeInt}, + {Name: "api_key_mask_prefix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(5)"}}, + {Name: "api_key_mask_suffix", Type: field.TypeString, SchemaType: map[string]string{"postgres": "character varying(5)"}}, {Name: "created_at", Type: field.TypeTime, Default: "CURRENT_TIMESTAMP"}, {Name: "updated_at", Type: field.TypeTime, Nullable: true}, {Name: "name", Type: field.TypeString, Default: "Unnamed API Key", SchemaType: map[string]string{"postgres": "text"}}, @@ -180,13 +211,13 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "team_api_keys_teams_team_api_keys", - Columns: []*schema.Column{TeamAPIKeysColumns[6]}, + Columns: []*schema.Column{TeamAPIKeysColumns[11]}, RefColumns: []*schema.Column{TeamsColumns[0]}, OnDelete: schema.Cascade, }, { Symbol: "team_api_keys_users_created_api_keys", - Columns: []*schema.Column{TeamAPIKeysColumns[7]}, + Columns: []*schema.Column{TeamAPIKeysColumns[12]}, RefColumns: []*schema.Column{UsersColumns[0]}, OnDelete: schema.SetNull, }, @@ -254,6 +285,7 @@ var ( // Tables holds all the tables in the schema. Tables = []*schema.Table{ AccessTokensTable, + ClustersTable, EnvsTable, EnvAliasesTable, EnvBuildsTable, @@ -269,6 +301,7 @@ var ( func init() { AccessTokensTable.ForeignKeys[0].RefTable = UsersTable AccessTokensTable.Annotation = &entsql.Annotation{} + ClustersTable.Annotation = &entsql.Annotation{} EnvsTable.ForeignKeys[0].RefTable = TeamsTable EnvsTable.ForeignKeys[1].RefTable = UsersTable EnvsTable.Annotation = &entsql.Annotation{} diff --git a/packages/shared/pkg/models/mutation.go b/packages/shared/pkg/models/mutation.go index 6314253..77a7dfa 100644 --- a/packages/shared/pkg/models/mutation.go +++ b/packages/shared/pkg/models/mutation.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" "github.com/e2b-dev/infra/packages/shared/pkg/models/env" "github.com/e2b-dev/infra/packages/shared/pkg/models/envalias" "github.com/e2b-dev/infra/packages/shared/pkg/models/envbuild" @@ -35,6 +36,7 @@ const ( // Node types. TypeAccessToken = "AccessToken" + TypeCluster = "Cluster" TypeEnv = "Env" TypeEnvAlias = "EnvAlias" TypeEnvBuild = "EnvBuild" @@ -49,16 +51,24 @@ const ( // AccessTokenMutation represents an operation that mutates the AccessToken nodes in the graph. type AccessTokenMutation struct { config - op Op - typ string - id *string - created_at *time.Time - clearedFields map[string]struct{} - user *uuid.UUID - cleareduser bool - done bool - oldValue func(context.Context) (*AccessToken, error) - predicates []predicate.AccessToken + op Op + typ string + id *uuid.UUID + access_token *string + access_token_hash *string + access_token_prefix *string + access_token_length *int + addaccess_token_length *int + access_token_mask_prefix *string + access_token_mask_suffix *string + name *string + created_at *time.Time + clearedFields map[string]struct{} + user *uuid.UUID + cleareduser bool + done bool + oldValue func(context.Context) (*AccessToken, error) + predicates []predicate.AccessToken } var _ ent.Mutation = (*AccessTokenMutation)(nil) @@ -81,7 +91,7 @@ func newAccessTokenMutation(c config, op Op, opts ...accesstokenOption) *AccessT } // withAccessTokenID sets the ID field of the mutation. -func withAccessTokenID(id string) accesstokenOption { +func withAccessTokenID(id uuid.UUID) accesstokenOption { return func(m *AccessTokenMutation) { var ( err error @@ -133,13 +143,13 @@ func (m AccessTokenMutation) Tx() (*Tx, error) { // SetID sets the value of the id field. Note that this // operation is only accepted on creation of AccessToken entities. -func (m *AccessTokenMutation) SetID(id string) { +func (m *AccessTokenMutation) SetID(id uuid.UUID) { m.id = &id } // ID returns the ID value in the mutation. Note that the ID is only available // if it was provided to the builder or after it was returned from the database. -func (m *AccessTokenMutation) ID() (id string, exists bool) { +func (m *AccessTokenMutation) ID() (id uuid.UUID, exists bool) { if m.id == nil { return } @@ -150,12 +160,12 @@ func (m *AccessTokenMutation) ID() (id string, exists bool) { // That means, if the mutation is applied within a transaction with an isolation level such // as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated // or updated by the mutation. -func (m *AccessTokenMutation) IDs(ctx context.Context) ([]string, error) { +func (m *AccessTokenMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { switch { case m.op.Is(OpUpdateOne | OpDeleteOne): id, exists := m.ID() if exists { - return []string{id}, nil + return []uuid.UUID{id}, nil } fallthrough case m.op.Is(OpUpdate | OpDelete): @@ -165,6 +175,278 @@ func (m *AccessTokenMutation) IDs(ctx context.Context) ([]string, error) { } } +// SetAccessToken sets the "access_token" field. +func (m *AccessTokenMutation) SetAccessToken(s string) { + m.access_token = &s +} + +// AccessToken returns the value of the "access_token" field in the mutation. +func (m *AccessTokenMutation) AccessToken() (r string, exists bool) { + v := m.access_token + if v == nil { + return + } + return *v, true +} + +// OldAccessToken returns the old "access_token" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessToken: %w", err) + } + return oldValue.AccessToken, nil +} + +// ResetAccessToken resets all changes to the "access_token" field. +func (m *AccessTokenMutation) ResetAccessToken() { + m.access_token = nil +} + +// SetAccessTokenHash sets the "access_token_hash" field. +func (m *AccessTokenMutation) SetAccessTokenHash(s string) { + m.access_token_hash = &s +} + +// AccessTokenHash returns the value of the "access_token_hash" field in the mutation. +func (m *AccessTokenMutation) AccessTokenHash() (r string, exists bool) { + v := m.access_token_hash + if v == nil { + return + } + return *v, true +} + +// OldAccessTokenHash returns the old "access_token_hash" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessTokenHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessTokenHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessTokenHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessTokenHash: %w", err) + } + return oldValue.AccessTokenHash, nil +} + +// ResetAccessTokenHash resets all changes to the "access_token_hash" field. +func (m *AccessTokenMutation) ResetAccessTokenHash() { + m.access_token_hash = nil +} + +// SetAccessTokenPrefix sets the "access_token_prefix" field. +func (m *AccessTokenMutation) SetAccessTokenPrefix(s string) { + m.access_token_prefix = &s +} + +// AccessTokenPrefix returns the value of the "access_token_prefix" field in the mutation. +func (m *AccessTokenMutation) AccessTokenPrefix() (r string, exists bool) { + v := m.access_token_prefix + if v == nil { + return + } + return *v, true +} + +// OldAccessTokenPrefix returns the old "access_token_prefix" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessTokenPrefix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessTokenPrefix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessTokenPrefix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessTokenPrefix: %w", err) + } + return oldValue.AccessTokenPrefix, nil +} + +// ResetAccessTokenPrefix resets all changes to the "access_token_prefix" field. +func (m *AccessTokenMutation) ResetAccessTokenPrefix() { + m.access_token_prefix = nil +} + +// SetAccessTokenLength sets the "access_token_length" field. +func (m *AccessTokenMutation) SetAccessTokenLength(i int) { + m.access_token_length = &i + m.addaccess_token_length = nil +} + +// AccessTokenLength returns the value of the "access_token_length" field in the mutation. +func (m *AccessTokenMutation) AccessTokenLength() (r int, exists bool) { + v := m.access_token_length + if v == nil { + return + } + return *v, true +} + +// OldAccessTokenLength returns the old "access_token_length" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessTokenLength(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessTokenLength is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessTokenLength requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessTokenLength: %w", err) + } + return oldValue.AccessTokenLength, nil +} + +// AddAccessTokenLength adds i to the "access_token_length" field. +func (m *AccessTokenMutation) AddAccessTokenLength(i int) { + if m.addaccess_token_length != nil { + *m.addaccess_token_length += i + } else { + m.addaccess_token_length = &i + } +} + +// AddedAccessTokenLength returns the value that was added to the "access_token_length" field in this mutation. +func (m *AccessTokenMutation) AddedAccessTokenLength() (r int, exists bool) { + v := m.addaccess_token_length + if v == nil { + return + } + return *v, true +} + +// ResetAccessTokenLength resets all changes to the "access_token_length" field. +func (m *AccessTokenMutation) ResetAccessTokenLength() { + m.access_token_length = nil + m.addaccess_token_length = nil +} + +// SetAccessTokenMaskPrefix sets the "access_token_mask_prefix" field. +func (m *AccessTokenMutation) SetAccessTokenMaskPrefix(s string) { + m.access_token_mask_prefix = &s +} + +// AccessTokenMaskPrefix returns the value of the "access_token_mask_prefix" field in the mutation. +func (m *AccessTokenMutation) AccessTokenMaskPrefix() (r string, exists bool) { + v := m.access_token_mask_prefix + if v == nil { + return + } + return *v, true +} + +// OldAccessTokenMaskPrefix returns the old "access_token_mask_prefix" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessTokenMaskPrefix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessTokenMaskPrefix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessTokenMaskPrefix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessTokenMaskPrefix: %w", err) + } + return oldValue.AccessTokenMaskPrefix, nil +} + +// ResetAccessTokenMaskPrefix resets all changes to the "access_token_mask_prefix" field. +func (m *AccessTokenMutation) ResetAccessTokenMaskPrefix() { + m.access_token_mask_prefix = nil +} + +// SetAccessTokenMaskSuffix sets the "access_token_mask_suffix" field. +func (m *AccessTokenMutation) SetAccessTokenMaskSuffix(s string) { + m.access_token_mask_suffix = &s +} + +// AccessTokenMaskSuffix returns the value of the "access_token_mask_suffix" field in the mutation. +func (m *AccessTokenMutation) AccessTokenMaskSuffix() (r string, exists bool) { + v := m.access_token_mask_suffix + if v == nil { + return + } + return *v, true +} + +// OldAccessTokenMaskSuffix returns the old "access_token_mask_suffix" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldAccessTokenMaskSuffix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessTokenMaskSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessTokenMaskSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessTokenMaskSuffix: %w", err) + } + return oldValue.AccessTokenMaskSuffix, nil +} + +// ResetAccessTokenMaskSuffix resets all changes to the "access_token_mask_suffix" field. +func (m *AccessTokenMutation) ResetAccessTokenMaskSuffix() { + m.access_token_mask_suffix = nil +} + +// SetName sets the "name" field. +func (m *AccessTokenMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *AccessTokenMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the AccessToken entity. +// If the AccessToken object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccessTokenMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *AccessTokenMutation) ResetName() { + m.name = nil +} + // SetUserID sets the "user_id" field. func (m *AccessTokenMutation) SetUserID(u uuid.UUID) { m.user = &u @@ -250,42 +532,634 @@ func (m *AccessTokenMutation) ResetCreatedAt() { delete(m.clearedFields, accesstoken.FieldCreatedAt) } -// ClearUser clears the "user" edge to the User entity. -func (m *AccessTokenMutation) ClearUser() { - m.cleareduser = true - m.clearedFields[accesstoken.FieldUserID] = struct{}{} +// ClearUser clears the "user" edge to the User entity. +func (m *AccessTokenMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[accesstoken.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *AccessTokenMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *AccessTokenMutation) UserIDs() (ids []uuid.UUID) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *AccessTokenMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the AccessTokenMutation builder. +func (m *AccessTokenMutation) Where(ps ...predicate.AccessToken) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AccessTokenMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AccessTokenMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AccessToken, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AccessTokenMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AccessTokenMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AccessToken). +func (m *AccessTokenMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AccessTokenMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.access_token != nil { + fields = append(fields, accesstoken.FieldAccessToken) + } + if m.access_token_hash != nil { + fields = append(fields, accesstoken.FieldAccessTokenHash) + } + if m.access_token_prefix != nil { + fields = append(fields, accesstoken.FieldAccessTokenPrefix) + } + if m.access_token_length != nil { + fields = append(fields, accesstoken.FieldAccessTokenLength) + } + if m.access_token_mask_prefix != nil { + fields = append(fields, accesstoken.FieldAccessTokenMaskPrefix) + } + if m.access_token_mask_suffix != nil { + fields = append(fields, accesstoken.FieldAccessTokenMaskSuffix) + } + if m.name != nil { + fields = append(fields, accesstoken.FieldName) + } + if m.user != nil { + fields = append(fields, accesstoken.FieldUserID) + } + if m.created_at != nil { + fields = append(fields, accesstoken.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AccessTokenMutation) Field(name string) (ent.Value, bool) { + switch name { + case accesstoken.FieldAccessToken: + return m.AccessToken() + case accesstoken.FieldAccessTokenHash: + return m.AccessTokenHash() + case accesstoken.FieldAccessTokenPrefix: + return m.AccessTokenPrefix() + case accesstoken.FieldAccessTokenLength: + return m.AccessTokenLength() + case accesstoken.FieldAccessTokenMaskPrefix: + return m.AccessTokenMaskPrefix() + case accesstoken.FieldAccessTokenMaskSuffix: + return m.AccessTokenMaskSuffix() + case accesstoken.FieldName: + return m.Name() + case accesstoken.FieldUserID: + return m.UserID() + case accesstoken.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AccessTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case accesstoken.FieldAccessToken: + return m.OldAccessToken(ctx) + case accesstoken.FieldAccessTokenHash: + return m.OldAccessTokenHash(ctx) + case accesstoken.FieldAccessTokenPrefix: + return m.OldAccessTokenPrefix(ctx) + case accesstoken.FieldAccessTokenLength: + return m.OldAccessTokenLength(ctx) + case accesstoken.FieldAccessTokenMaskPrefix: + return m.OldAccessTokenMaskPrefix(ctx) + case accesstoken.FieldAccessTokenMaskSuffix: + return m.OldAccessTokenMaskSuffix(ctx) + case accesstoken.FieldName: + return m.OldName(ctx) + case accesstoken.FieldUserID: + return m.OldUserID(ctx) + case accesstoken.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown AccessToken field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccessTokenMutation) SetField(name string, value ent.Value) error { + switch name { + case accesstoken.FieldAccessToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessToken(v) + return nil + case accesstoken.FieldAccessTokenHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessTokenHash(v) + return nil + case accesstoken.FieldAccessTokenPrefix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessTokenPrefix(v) + return nil + case accesstoken.FieldAccessTokenLength: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessTokenLength(v) + return nil + case accesstoken.FieldAccessTokenMaskPrefix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessTokenMaskPrefix(v) + return nil + case accesstoken.FieldAccessTokenMaskSuffix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessTokenMaskSuffix(v) + return nil + case accesstoken.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case accesstoken.FieldUserID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case accesstoken.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown AccessToken field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AccessTokenMutation) AddedFields() []string { + var fields []string + if m.addaccess_token_length != nil { + fields = append(fields, accesstoken.FieldAccessTokenLength) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AccessTokenMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case accesstoken.FieldAccessTokenLength: + return m.AddedAccessTokenLength() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccessTokenMutation) AddField(name string, value ent.Value) error { + switch name { + case accesstoken.FieldAccessTokenLength: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddAccessTokenLength(v) + return nil + } + return fmt.Errorf("unknown AccessToken numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AccessTokenMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(accesstoken.FieldCreatedAt) { + fields = append(fields, accesstoken.FieldCreatedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AccessTokenMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AccessTokenMutation) ClearField(name string) error { + switch name { + case accesstoken.FieldCreatedAt: + m.ClearCreatedAt() + return nil + } + return fmt.Errorf("unknown AccessToken nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AccessTokenMutation) ResetField(name string) error { + switch name { + case accesstoken.FieldAccessToken: + m.ResetAccessToken() + return nil + case accesstoken.FieldAccessTokenHash: + m.ResetAccessTokenHash() + return nil + case accesstoken.FieldAccessTokenPrefix: + m.ResetAccessTokenPrefix() + return nil + case accesstoken.FieldAccessTokenLength: + m.ResetAccessTokenLength() + return nil + case accesstoken.FieldAccessTokenMaskPrefix: + m.ResetAccessTokenMaskPrefix() + return nil + case accesstoken.FieldAccessTokenMaskSuffix: + m.ResetAccessTokenMaskSuffix() + return nil + case accesstoken.FieldName: + m.ResetName() + return nil + case accesstoken.FieldUserID: + m.ResetUserID() + return nil + case accesstoken.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown AccessToken field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AccessTokenMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.user != nil { + edges = append(edges, accesstoken.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AccessTokenMutation) AddedIDs(name string) []ent.Value { + switch name { + case accesstoken.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AccessTokenMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AccessTokenMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AccessTokenMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.cleareduser { + edges = append(edges, accesstoken.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AccessTokenMutation) EdgeCleared(name string) bool { + switch name { + case accesstoken.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AccessTokenMutation) ClearEdge(name string) error { + switch name { + case accesstoken.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown AccessToken unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AccessTokenMutation) ResetEdge(name string) error { + switch name { + case accesstoken.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown AccessToken edge %s", name) +} + +// ClusterMutation represents an operation that mutates the Cluster nodes in the graph. +type ClusterMutation struct { + config + op Op + typ string + id *uuid.UUID + endpoint *string + endpoint_tls *bool + token *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Cluster, error) + predicates []predicate.Cluster +} + +var _ ent.Mutation = (*ClusterMutation)(nil) + +// clusterOption allows management of the mutation configuration using functional options. +type clusterOption func(*ClusterMutation) + +// newClusterMutation creates new mutation for the Cluster entity. +func newClusterMutation(c config, op Op, opts ...clusterOption) *ClusterMutation { + m := &ClusterMutation{ + config: c, + op: op, + typ: TypeCluster, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withClusterID sets the ID field of the mutation. +func withClusterID(id uuid.UUID) clusterOption { + return func(m *ClusterMutation) { + var ( + err error + once sync.Once + value *Cluster + ) + m.oldValue = func(ctx context.Context) (*Cluster, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Cluster.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withCluster sets the old Cluster of the mutation. +func withCluster(node *Cluster) clusterOption { + return func(m *ClusterMutation) { + m.oldValue = func(context.Context) (*Cluster, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ClusterMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ClusterMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("models: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Cluster entities. +func (m *ClusterMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ClusterMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ClusterMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Cluster.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetEndpoint sets the "endpoint" field. +func (m *ClusterMutation) SetEndpoint(s string) { + m.endpoint = &s +} + +// Endpoint returns the value of the "endpoint" field in the mutation. +func (m *ClusterMutation) Endpoint() (r string, exists bool) { + v := m.endpoint + if v == nil { + return + } + return *v, true +} + +// OldEndpoint returns the old "endpoint" field's value of the Cluster entity. +// If the Cluster object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ClusterMutation) OldEndpoint(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndpoint is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndpoint requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndpoint: %w", err) + } + return oldValue.Endpoint, nil +} + +// ResetEndpoint resets all changes to the "endpoint" field. +func (m *ClusterMutation) ResetEndpoint() { + m.endpoint = nil +} + +// SetEndpointTLS sets the "endpoint_tls" field. +func (m *ClusterMutation) SetEndpointTLS(b bool) { + m.endpoint_tls = &b +} + +// EndpointTLS returns the value of the "endpoint_tls" field in the mutation. +func (m *ClusterMutation) EndpointTLS() (r bool, exists bool) { + v := m.endpoint_tls + if v == nil { + return + } + return *v, true +} + +// OldEndpointTLS returns the old "endpoint_tls" field's value of the Cluster entity. +// If the Cluster object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ClusterMutation) OldEndpointTLS(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndpointTLS is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndpointTLS requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndpointTLS: %w", err) + } + return oldValue.EndpointTLS, nil +} + +// ResetEndpointTLS resets all changes to the "endpoint_tls" field. +func (m *ClusterMutation) ResetEndpointTLS() { + m.endpoint_tls = nil } -// UserCleared reports if the "user" edge to the User entity was cleared. -func (m *AccessTokenMutation) UserCleared() bool { - return m.cleareduser +// SetToken sets the "token" field. +func (m *ClusterMutation) SetToken(s string) { + m.token = &s } -// UserIDs returns the "user" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// UserID instead. It exists only for internal usage by the builders. -func (m *AccessTokenMutation) UserIDs() (ids []uuid.UUID) { - if id := m.user; id != nil { - ids = append(ids, *id) +// Token returns the value of the "token" field in the mutation. +func (m *ClusterMutation) Token() (r string, exists bool) { + v := m.token + if v == nil { + return } - return + return *v, true } -// ResetUser resets all changes to the "user" edge. -func (m *AccessTokenMutation) ResetUser() { - m.user = nil - m.cleareduser = false +// OldToken returns the old "token" field's value of the Cluster entity. +// If the Cluster object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ClusterMutation) OldToken(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldToken is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldToken requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldToken: %w", err) + } + return oldValue.Token, nil } -// Where appends a list predicates to the AccessTokenMutation builder. -func (m *AccessTokenMutation) Where(ps ...predicate.AccessToken) { +// ResetToken resets all changes to the "token" field. +func (m *ClusterMutation) ResetToken() { + m.token = nil +} + +// Where appends a list predicates to the ClusterMutation builder. +func (m *ClusterMutation) Where(ps ...predicate.Cluster) { m.predicates = append(m.predicates, ps...) } -// WhereP appends storage-level predicates to the AccessTokenMutation builder. Using this method, +// WhereP appends storage-level predicates to the ClusterMutation builder. Using this method, // users can use type-assertion to append predicates that do not depend on any generated package. -func (m *AccessTokenMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.AccessToken, len(ps)) +func (m *ClusterMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Cluster, len(ps)) for i := range ps { p[i] = ps[i] } @@ -293,30 +1167,33 @@ func (m *AccessTokenMutation) WhereP(ps ...func(*sql.Selector)) { } // Op returns the operation name. -func (m *AccessTokenMutation) Op() Op { +func (m *ClusterMutation) Op() Op { return m.op } // SetOp allows setting the mutation operation. -func (m *AccessTokenMutation) SetOp(op Op) { +func (m *ClusterMutation) SetOp(op Op) { m.op = op } -// Type returns the node type of this mutation (AccessToken). -func (m *AccessTokenMutation) Type() string { +// Type returns the node type of this mutation (Cluster). +func (m *ClusterMutation) Type() string { return m.typ } // Fields returns all fields that were changed during this mutation. Note that in // order to get all numeric fields that were incremented/decremented, call // AddedFields(). -func (m *AccessTokenMutation) Fields() []string { - fields := make([]string, 0, 2) - if m.user != nil { - fields = append(fields, accesstoken.FieldUserID) +func (m *ClusterMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.endpoint != nil { + fields = append(fields, cluster.FieldEndpoint) } - if m.created_at != nil { - fields = append(fields, accesstoken.FieldCreatedAt) + if m.endpoint_tls != nil { + fields = append(fields, cluster.FieldEndpointTLS) + } + if m.token != nil { + fields = append(fields, cluster.FieldToken) } return fields } @@ -324,12 +1201,14 @@ func (m *AccessTokenMutation) Fields() []string { // Field returns the value of a field with the given name. The second boolean // return value indicates that this field was not set, or was not defined in the // schema. -func (m *AccessTokenMutation) Field(name string) (ent.Value, bool) { +func (m *ClusterMutation) Field(name string) (ent.Value, bool) { switch name { - case accesstoken.FieldUserID: - return m.UserID() - case accesstoken.FieldCreatedAt: - return m.CreatedAt() + case cluster.FieldEndpoint: + return m.Endpoint() + case cluster.FieldEndpointTLS: + return m.EndpointTLS() + case cluster.FieldToken: + return m.Token() } return nil, false } @@ -337,175 +1216,152 @@ func (m *AccessTokenMutation) Field(name string) (ent.Value, bool) { // OldField returns the old value of the field from the database. An error is // returned if the mutation operation is not UpdateOne, or the query to the // database failed. -func (m *AccessTokenMutation) OldField(ctx context.Context, name string) (ent.Value, error) { +func (m *ClusterMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { - case accesstoken.FieldUserID: - return m.OldUserID(ctx) - case accesstoken.FieldCreatedAt: - return m.OldCreatedAt(ctx) + case cluster.FieldEndpoint: + return m.OldEndpoint(ctx) + case cluster.FieldEndpointTLS: + return m.OldEndpointTLS(ctx) + case cluster.FieldToken: + return m.OldToken(ctx) } - return nil, fmt.Errorf("unknown AccessToken field %s", name) + return nil, fmt.Errorf("unknown Cluster field %s", name) } // SetField sets the value of a field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *AccessTokenMutation) SetField(name string, value ent.Value) error { +func (m *ClusterMutation) SetField(name string, value ent.Value) error { switch name { - case accesstoken.FieldUserID: - v, ok := value.(uuid.UUID) + case cluster.FieldEndpoint: + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetUserID(v) + m.SetEndpoint(v) return nil - case accesstoken.FieldCreatedAt: - v, ok := value.(time.Time) + case cluster.FieldEndpointTLS: + v, ok := value.(bool) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCreatedAt(v) + m.SetEndpointTLS(v) + return nil + case cluster.FieldToken: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetToken(v) return nil } - return fmt.Errorf("unknown AccessToken field %s", name) + return fmt.Errorf("unknown Cluster field %s", name) } // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. -func (m *AccessTokenMutation) AddedFields() []string { +func (m *ClusterMutation) AddedFields() []string { return nil } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. -func (m *AccessTokenMutation) AddedField(name string) (ent.Value, bool) { +func (m *ClusterMutation) AddedField(name string) (ent.Value, bool) { return nil, false } // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field // type. -func (m *AccessTokenMutation) AddField(name string, value ent.Value) error { +func (m *ClusterMutation) AddField(name string, value ent.Value) error { switch name { } - return fmt.Errorf("unknown AccessToken numeric field %s", name) + return fmt.Errorf("unknown Cluster numeric field %s", name) } // ClearedFields returns all nullable fields that were cleared during this // mutation. -func (m *AccessTokenMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(accesstoken.FieldCreatedAt) { - fields = append(fields, accesstoken.FieldCreatedAt) - } - return fields +func (m *ClusterMutation) ClearedFields() []string { + return nil } // FieldCleared returns a boolean indicating if a field with the given name was // cleared in this mutation. -func (m *AccessTokenMutation) FieldCleared(name string) bool { +func (m *ClusterMutation) FieldCleared(name string) bool { _, ok := m.clearedFields[name] return ok } // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. -func (m *AccessTokenMutation) ClearField(name string) error { - switch name { - case accesstoken.FieldCreatedAt: - m.ClearCreatedAt() - return nil - } - return fmt.Errorf("unknown AccessToken nullable field %s", name) +func (m *ClusterMutation) ClearField(name string) error { + return fmt.Errorf("unknown Cluster nullable field %s", name) } // ResetField resets all changes in the mutation for the field with the given name. // It returns an error if the field is not defined in the schema. -func (m *AccessTokenMutation) ResetField(name string) error { +func (m *ClusterMutation) ResetField(name string) error { switch name { - case accesstoken.FieldUserID: - m.ResetUserID() + case cluster.FieldEndpoint: + m.ResetEndpoint() return nil - case accesstoken.FieldCreatedAt: - m.ResetCreatedAt() + case cluster.FieldEndpointTLS: + m.ResetEndpointTLS() + return nil + case cluster.FieldToken: + m.ResetToken() return nil } - return fmt.Errorf("unknown AccessToken field %s", name) + return fmt.Errorf("unknown Cluster field %s", name) } // AddedEdges returns all edge names that were set/added in this mutation. -func (m *AccessTokenMutation) AddedEdges() []string { - edges := make([]string, 0, 1) - if m.user != nil { - edges = append(edges, accesstoken.EdgeUser) - } +func (m *ClusterMutation) AddedEdges() []string { + edges := make([]string, 0, 0) return edges } // AddedIDs returns all IDs (to other nodes) that were added for the given edge // name in this mutation. -func (m *AccessTokenMutation) AddedIDs(name string) []ent.Value { - switch name { - case accesstoken.EdgeUser: - if id := m.user; id != nil { - return []ent.Value{*id} - } - } +func (m *ClusterMutation) AddedIDs(name string) []ent.Value { return nil } // RemovedEdges returns all edge names that were removed in this mutation. -func (m *AccessTokenMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) +func (m *ClusterMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) return edges } // RemovedIDs returns all IDs (to other nodes) that were removed for the edge with // the given name in this mutation. -func (m *AccessTokenMutation) RemovedIDs(name string) []ent.Value { +func (m *ClusterMutation) RemovedIDs(name string) []ent.Value { return nil } // ClearedEdges returns all edge names that were cleared in this mutation. -func (m *AccessTokenMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) - if m.cleareduser { - edges = append(edges, accesstoken.EdgeUser) - } +func (m *ClusterMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) return edges } // EdgeCleared returns a boolean which indicates if the edge with the given name // was cleared in this mutation. -func (m *AccessTokenMutation) EdgeCleared(name string) bool { - switch name { - case accesstoken.EdgeUser: - return m.cleareduser - } +func (m *ClusterMutation) EdgeCleared(name string) bool { return false } // ClearEdge clears the value of the edge with the given name. It returns an error // if that edge is not defined in the schema. -func (m *AccessTokenMutation) ClearEdge(name string) error { - switch name { - case accesstoken.EdgeUser: - m.ClearUser() - return nil - } - return fmt.Errorf("unknown AccessToken unique edge %s", name) +func (m *ClusterMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Cluster unique edge %s", name) } // ResetEdge resets all changes to the edge with the given name in this mutation. // It returns an error if the edge is not defined in the schema. -func (m *AccessTokenMutation) ResetEdge(name string) error { - switch name { - case accesstoken.EdgeUser: - m.ResetUser() - return nil - } - return fmt.Errorf("unknown AccessToken edge %s", name) +func (m *ClusterMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Cluster edge %s", name) } // EnvMutation represents an operation that mutates the Env nodes in the graph. @@ -522,6 +1378,7 @@ type EnvMutation struct { spawn_count *int64 addspawn_count *int64 last_spawned_at *time.Time + cluster_id *uuid.UUID clearedFields map[string]struct{} team *uuid.UUID clearedteam bool @@ -999,6 +1856,55 @@ func (m *EnvMutation) ResetLastSpawnedAt() { delete(m.clearedFields, env.FieldLastSpawnedAt) } +// SetClusterID sets the "cluster_id" field. +func (m *EnvMutation) SetClusterID(u uuid.UUID) { + m.cluster_id = &u +} + +// ClusterID returns the value of the "cluster_id" field in the mutation. +func (m *EnvMutation) ClusterID() (r uuid.UUID, exists bool) { + v := m.cluster_id + if v == nil { + return + } + return *v, true +} + +// OldClusterID returns the old "cluster_id" field's value of the Env entity. +// If the Env object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EnvMutation) OldClusterID(ctx context.Context) (v *uuid.UUID, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClusterID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClusterID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClusterID: %w", err) + } + return oldValue.ClusterID, nil +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (m *EnvMutation) ClearClusterID() { + m.cluster_id = nil + m.clearedFields[env.FieldClusterID] = struct{}{} +} + +// ClusterIDCleared returns if the "cluster_id" field was cleared in this mutation. +func (m *EnvMutation) ClusterIDCleared() bool { + _, ok := m.clearedFields[env.FieldClusterID] + return ok +} + +// ResetClusterID resets all changes to the "cluster_id" field. +func (m *EnvMutation) ResetClusterID() { + m.cluster_id = nil + delete(m.clearedFields, env.FieldClusterID) +} + // ClearTeam clears the "team" edge to the Team entity. func (m *EnvMutation) ClearTeam() { m.clearedteam = true @@ -1262,7 +2168,7 @@ func (m *EnvMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *EnvMutation) Fields() []string { - fields := make([]string, 0, 8) + fields := make([]string, 0, 9) if m.created_at != nil { fields = append(fields, env.FieldCreatedAt) } @@ -1287,6 +2193,9 @@ func (m *EnvMutation) Fields() []string { if m.last_spawned_at != nil { fields = append(fields, env.FieldLastSpawnedAt) } + if m.cluster_id != nil { + fields = append(fields, env.FieldClusterID) + } return fields } @@ -1311,6 +2220,8 @@ func (m *EnvMutation) Field(name string) (ent.Value, bool) { return m.SpawnCount() case env.FieldLastSpawnedAt: return m.LastSpawnedAt() + case env.FieldClusterID: + return m.ClusterID() } return nil, false } @@ -1336,6 +2247,8 @@ func (m *EnvMutation) OldField(ctx context.Context, name string) (ent.Value, err return m.OldSpawnCount(ctx) case env.FieldLastSpawnedAt: return m.OldLastSpawnedAt(ctx) + case env.FieldClusterID: + return m.OldClusterID(ctx) } return nil, fmt.Errorf("unknown Env field %s", name) } @@ -1401,6 +2314,13 @@ func (m *EnvMutation) SetField(name string, value ent.Value) error { } m.SetLastSpawnedAt(v) return nil + case env.FieldClusterID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClusterID(v) + return nil } return fmt.Errorf("unknown Env field %s", name) } @@ -1464,6 +2384,9 @@ func (m *EnvMutation) ClearedFields() []string { if m.FieldCleared(env.FieldLastSpawnedAt) { fields = append(fields, env.FieldLastSpawnedAt) } + if m.FieldCleared(env.FieldClusterID) { + fields = append(fields, env.FieldClusterID) + } return fields } @@ -1484,6 +2407,9 @@ func (m *EnvMutation) ClearField(name string) error { case env.FieldLastSpawnedAt: m.ClearLastSpawnedAt() return nil + case env.FieldClusterID: + m.ClearClusterID() + return nil } return fmt.Errorf("unknown Env nullable field %s", name) } @@ -1516,6 +2442,9 @@ func (m *EnvMutation) ResetField(name string) error { case env.FieldLastSpawnedAt: m.ResetLastSpawnedAt() return nil + case env.FieldClusterID: + m.ResetClusterID() + return nil } return fmt.Errorf("unknown Env field %s", name) } @@ -2144,6 +3073,7 @@ type EnvBuildMutation struct { status *envbuild.Status dockerfile *string start_cmd *string + ready_cmd *string vcpu *int64 addvcpu *int64 ram_mb *int64 @@ -2155,6 +3085,7 @@ type EnvBuildMutation struct { kernel_version *string firecracker_version *string envd_version *string + cluster_node_id *string clearedFields map[string]struct{} env *string clearedenv bool @@ -2571,6 +3502,55 @@ func (m *EnvBuildMutation) ResetStartCmd() { delete(m.clearedFields, envbuild.FieldStartCmd) } +// SetReadyCmd sets the "ready_cmd" field. +func (m *EnvBuildMutation) SetReadyCmd(s string) { + m.ready_cmd = &s +} + +// ReadyCmd returns the value of the "ready_cmd" field in the mutation. +func (m *EnvBuildMutation) ReadyCmd() (r string, exists bool) { + v := m.ready_cmd + if v == nil { + return + } + return *v, true +} + +// OldReadyCmd returns the old "ready_cmd" field's value of the EnvBuild entity. +// If the EnvBuild object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EnvBuildMutation) OldReadyCmd(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldReadyCmd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldReadyCmd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldReadyCmd: %w", err) + } + return oldValue.ReadyCmd, nil +} + +// ClearReadyCmd clears the value of the "ready_cmd" field. +func (m *EnvBuildMutation) ClearReadyCmd() { + m.ready_cmd = nil + m.clearedFields[envbuild.FieldReadyCmd] = struct{}{} +} + +// ReadyCmdCleared returns if the "ready_cmd" field was cleared in this mutation. +func (m *EnvBuildMutation) ReadyCmdCleared() bool { + _, ok := m.clearedFields[envbuild.FieldReadyCmd] + return ok +} + +// ResetReadyCmd resets all changes to the "ready_cmd" field. +func (m *EnvBuildMutation) ResetReadyCmd() { + m.ready_cmd = nil + delete(m.clearedFields, envbuild.FieldReadyCmd) +} + // SetVcpu sets the "vcpu" field. func (m *EnvBuildMutation) SetVcpu(i int64) { m.vcpu = &i @@ -2930,6 +3910,55 @@ func (m *EnvBuildMutation) ResetEnvdVersion() { delete(m.clearedFields, envbuild.FieldEnvdVersion) } +// SetClusterNodeID sets the "cluster_node_id" field. +func (m *EnvBuildMutation) SetClusterNodeID(s string) { + m.cluster_node_id = &s +} + +// ClusterNodeID returns the value of the "cluster_node_id" field in the mutation. +func (m *EnvBuildMutation) ClusterNodeID() (r string, exists bool) { + v := m.cluster_node_id + if v == nil { + return + } + return *v, true +} + +// OldClusterNodeID returns the old "cluster_node_id" field's value of the EnvBuild entity. +// If the EnvBuild object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *EnvBuildMutation) OldClusterNodeID(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClusterNodeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClusterNodeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClusterNodeID: %w", err) + } + return oldValue.ClusterNodeID, nil +} + +// ClearClusterNodeID clears the value of the "cluster_node_id" field. +func (m *EnvBuildMutation) ClearClusterNodeID() { + m.cluster_node_id = nil + m.clearedFields[envbuild.FieldClusterNodeID] = struct{}{} +} + +// ClusterNodeIDCleared returns if the "cluster_node_id" field was cleared in this mutation. +func (m *EnvBuildMutation) ClusterNodeIDCleared() bool { + _, ok := m.clearedFields[envbuild.FieldClusterNodeID] + return ok +} + +// ResetClusterNodeID resets all changes to the "cluster_node_id" field. +func (m *EnvBuildMutation) ResetClusterNodeID() { + m.cluster_node_id = nil + delete(m.clearedFields, envbuild.FieldClusterNodeID) +} + // ClearEnv clears the "env" edge to the Env entity. func (m *EnvBuildMutation) ClearEnv() { m.clearedenv = true @@ -2991,7 +4020,7 @@ func (m *EnvBuildMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *EnvBuildMutation) Fields() []string { - fields := make([]string, 0, 14) + fields := make([]string, 0, 16) if m.created_at != nil { fields = append(fields, envbuild.FieldCreatedAt) } @@ -3013,6 +4042,9 @@ func (m *EnvBuildMutation) Fields() []string { if m.start_cmd != nil { fields = append(fields, envbuild.FieldStartCmd) } + if m.ready_cmd != nil { + fields = append(fields, envbuild.FieldReadyCmd) + } if m.vcpu != nil { fields = append(fields, envbuild.FieldVcpu) } @@ -3034,6 +4066,9 @@ func (m *EnvBuildMutation) Fields() []string { if m.envd_version != nil { fields = append(fields, envbuild.FieldEnvdVersion) } + if m.cluster_node_id != nil { + fields = append(fields, envbuild.FieldClusterNodeID) + } return fields } @@ -3056,6 +4091,8 @@ func (m *EnvBuildMutation) Field(name string) (ent.Value, bool) { return m.Dockerfile() case envbuild.FieldStartCmd: return m.StartCmd() + case envbuild.FieldReadyCmd: + return m.ReadyCmd() case envbuild.FieldVcpu: return m.Vcpu() case envbuild.FieldRAMMB: @@ -3070,6 +4107,8 @@ func (m *EnvBuildMutation) Field(name string) (ent.Value, bool) { return m.FirecrackerVersion() case envbuild.FieldEnvdVersion: return m.EnvdVersion() + case envbuild.FieldClusterNodeID: + return m.ClusterNodeID() } return nil, false } @@ -3093,6 +4132,8 @@ func (m *EnvBuildMutation) OldField(ctx context.Context, name string) (ent.Value return m.OldDockerfile(ctx) case envbuild.FieldStartCmd: return m.OldStartCmd(ctx) + case envbuild.FieldReadyCmd: + return m.OldReadyCmd(ctx) case envbuild.FieldVcpu: return m.OldVcpu(ctx) case envbuild.FieldRAMMB: @@ -3107,6 +4148,8 @@ func (m *EnvBuildMutation) OldField(ctx context.Context, name string) (ent.Value return m.OldFirecrackerVersion(ctx) case envbuild.FieldEnvdVersion: return m.OldEnvdVersion(ctx) + case envbuild.FieldClusterNodeID: + return m.OldClusterNodeID(ctx) } return nil, fmt.Errorf("unknown EnvBuild field %s", name) } @@ -3165,6 +4208,13 @@ func (m *EnvBuildMutation) SetField(name string, value ent.Value) error { } m.SetStartCmd(v) return nil + case envbuild.FieldReadyCmd: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetReadyCmd(v) + return nil case envbuild.FieldVcpu: v, ok := value.(int64) if !ok { @@ -3214,6 +4264,13 @@ func (m *EnvBuildMutation) SetField(name string, value ent.Value) error { } m.SetEnvdVersion(v) return nil + case envbuild.FieldClusterNodeID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClusterNodeID(v) + return nil } return fmt.Errorf("unknown EnvBuild field %s", name) } @@ -3307,12 +4364,18 @@ func (m *EnvBuildMutation) ClearedFields() []string { if m.FieldCleared(envbuild.FieldStartCmd) { fields = append(fields, envbuild.FieldStartCmd) } + if m.FieldCleared(envbuild.FieldReadyCmd) { + fields = append(fields, envbuild.FieldReadyCmd) + } if m.FieldCleared(envbuild.FieldTotalDiskSizeMB) { fields = append(fields, envbuild.FieldTotalDiskSizeMB) } if m.FieldCleared(envbuild.FieldEnvdVersion) { fields = append(fields, envbuild.FieldEnvdVersion) } + if m.FieldCleared(envbuild.FieldClusterNodeID) { + fields = append(fields, envbuild.FieldClusterNodeID) + } return fields } @@ -3339,12 +4402,18 @@ func (m *EnvBuildMutation) ClearField(name string) error { case envbuild.FieldStartCmd: m.ClearStartCmd() return nil + case envbuild.FieldReadyCmd: + m.ClearReadyCmd() + return nil case envbuild.FieldTotalDiskSizeMB: m.ClearTotalDiskSizeMB() return nil case envbuild.FieldEnvdVersion: m.ClearEnvdVersion() return nil + case envbuild.FieldClusterNodeID: + m.ClearClusterNodeID() + return nil } return fmt.Errorf("unknown EnvBuild nullable field %s", name) } @@ -3374,6 +4443,9 @@ func (m *EnvBuildMutation) ResetField(name string) error { case envbuild.FieldStartCmd: m.ResetStartCmd() return nil + case envbuild.FieldReadyCmd: + m.ResetReadyCmd() + return nil case envbuild.FieldVcpu: m.ResetVcpu() return nil @@ -3395,6 +4467,9 @@ func (m *EnvBuildMutation) ResetField(name string) error { case envbuild.FieldEnvdVersion: m.ResetEnvdVersion() return nil + case envbuild.FieldClusterNodeID: + m.ResetClusterNodeID() + return nil } return fmt.Errorf("unknown EnvBuild field %s", name) } @@ -3476,19 +4551,21 @@ func (m *EnvBuildMutation) ResetEdge(name string) error { // SnapshotMutation represents an operation that mutates the Snapshot nodes in the graph. type SnapshotMutation struct { config - op Op - typ string - id *uuid.UUID - created_at *time.Time - base_env_id *string - sandbox_id *string - metadata *map[string]string - clearedFields map[string]struct{} - env *string - clearedenv bool - done bool - oldValue func(context.Context) (*Snapshot, error) - predicates []predicate.Snapshot + op Op + typ string + id *uuid.UUID + created_at *time.Time + base_env_id *string + sandbox_id *string + metadata *map[string]string + sandbox_started_at *time.Time + env_secure *bool + clearedFields map[string]struct{} + env *string + clearedenv bool + done bool + oldValue func(context.Context) (*Snapshot, error) + predicates []predicate.Snapshot } var _ ent.Mutation = (*SnapshotMutation)(nil) @@ -3775,6 +4852,78 @@ func (m *SnapshotMutation) ResetMetadata() { m.metadata = nil } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (m *SnapshotMutation) SetSandboxStartedAt(t time.Time) { + m.sandbox_started_at = &t +} + +// SandboxStartedAt returns the value of the "sandbox_started_at" field in the mutation. +func (m *SnapshotMutation) SandboxStartedAt() (r time.Time, exists bool) { + v := m.sandbox_started_at + if v == nil { + return + } + return *v, true +} + +// OldSandboxStartedAt returns the old "sandbox_started_at" field's value of the Snapshot entity. +// If the Snapshot object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SnapshotMutation) OldSandboxStartedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSandboxStartedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSandboxStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSandboxStartedAt: %w", err) + } + return oldValue.SandboxStartedAt, nil +} + +// ResetSandboxStartedAt resets all changes to the "sandbox_started_at" field. +func (m *SnapshotMutation) ResetSandboxStartedAt() { + m.sandbox_started_at = nil +} + +// SetEnvSecure sets the "env_secure" field. +func (m *SnapshotMutation) SetEnvSecure(b bool) { + m.env_secure = &b +} + +// EnvSecure returns the value of the "env_secure" field in the mutation. +func (m *SnapshotMutation) EnvSecure() (r bool, exists bool) { + v := m.env_secure + if v == nil { + return + } + return *v, true +} + +// OldEnvSecure returns the old "env_secure" field's value of the Snapshot entity. +// If the Snapshot object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SnapshotMutation) OldEnvSecure(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEnvSecure is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEnvSecure requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEnvSecure: %w", err) + } + return oldValue.EnvSecure, nil +} + +// ResetEnvSecure resets all changes to the "env_secure" field. +func (m *SnapshotMutation) ResetEnvSecure() { + m.env_secure = nil +} + // ClearEnv clears the "env" edge to the Env entity. func (m *SnapshotMutation) ClearEnv() { m.clearedenv = true @@ -3836,7 +4985,7 @@ func (m *SnapshotMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *SnapshotMutation) Fields() []string { - fields := make([]string, 0, 5) + fields := make([]string, 0, 7) if m.created_at != nil { fields = append(fields, snapshot.FieldCreatedAt) } @@ -3852,6 +5001,12 @@ func (m *SnapshotMutation) Fields() []string { if m.metadata != nil { fields = append(fields, snapshot.FieldMetadata) } + if m.sandbox_started_at != nil { + fields = append(fields, snapshot.FieldSandboxStartedAt) + } + if m.env_secure != nil { + fields = append(fields, snapshot.FieldEnvSecure) + } return fields } @@ -3870,6 +5025,10 @@ func (m *SnapshotMutation) Field(name string) (ent.Value, bool) { return m.SandboxID() case snapshot.FieldMetadata: return m.Metadata() + case snapshot.FieldSandboxStartedAt: + return m.SandboxStartedAt() + case snapshot.FieldEnvSecure: + return m.EnvSecure() } return nil, false } @@ -3889,6 +5048,10 @@ func (m *SnapshotMutation) OldField(ctx context.Context, name string) (ent.Value return m.OldSandboxID(ctx) case snapshot.FieldMetadata: return m.OldMetadata(ctx) + case snapshot.FieldSandboxStartedAt: + return m.OldSandboxStartedAt(ctx) + case snapshot.FieldEnvSecure: + return m.OldEnvSecure(ctx) } return nil, fmt.Errorf("unknown Snapshot field %s", name) } @@ -3933,6 +5096,20 @@ func (m *SnapshotMutation) SetField(name string, value ent.Value) error { } m.SetMetadata(v) return nil + case snapshot.FieldSandboxStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSandboxStartedAt(v) + return nil + case snapshot.FieldEnvSecure: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEnvSecure(v) + return nil } return fmt.Errorf("unknown Snapshot field %s", name) } @@ -3997,6 +5174,12 @@ func (m *SnapshotMutation) ResetField(name string) error { case snapshot.FieldMetadata: m.ResetMetadata() return nil + case snapshot.FieldSandboxStartedAt: + m.ResetSandboxStartedAt() + return nil + case snapshot.FieldEnvSecure: + m.ResetEnvSecure() + return nil } return fmt.Errorf("unknown Snapshot field %s", name) } @@ -4087,6 +5270,7 @@ type TeamMutation struct { blocked_reason *string name *string email *string + cluster_id *uuid.UUID clearedFields map[string]struct{} users map[uuid.UUID]struct{} removedusers map[uuid.UUID]struct{} @@ -4502,6 +5686,55 @@ func (m *TeamMutation) ResetEmail() { m.email = nil } +// SetClusterID sets the "cluster_id" field. +func (m *TeamMutation) SetClusterID(u uuid.UUID) { + m.cluster_id = &u +} + +// ClusterID returns the value of the "cluster_id" field in the mutation. +func (m *TeamMutation) ClusterID() (r uuid.UUID, exists bool) { + v := m.cluster_id + if v == nil { + return + } + return *v, true +} + +// OldClusterID returns the old "cluster_id" field's value of the Team entity. +// If the Team object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamMutation) OldClusterID(ctx context.Context) (v *uuid.UUID, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClusterID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClusterID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClusterID: %w", err) + } + return oldValue.ClusterID, nil +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (m *TeamMutation) ClearClusterID() { + m.cluster_id = nil + m.clearedFields[team.FieldClusterID] = struct{}{} +} + +// ClusterIDCleared returns if the "cluster_id" field was cleared in this mutation. +func (m *TeamMutation) ClusterIDCleared() bool { + _, ok := m.clearedFields[team.FieldClusterID] + return ok +} + +// ResetClusterID resets all changes to the "cluster_id" field. +func (m *TeamMutation) ResetClusterID() { + m.cluster_id = nil + delete(m.clearedFields, team.FieldClusterID) +} + // AddUserIDs adds the "users" edge to the User entity by ids. func (m *TeamMutation) AddUserIDs(ids ...uuid.UUID) { if m.users == nil { @@ -4792,7 +6025,7 @@ func (m *TeamMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *TeamMutation) Fields() []string { - fields := make([]string, 0, 7) + fields := make([]string, 0, 8) if m.created_at != nil { fields = append(fields, team.FieldCreatedAt) } @@ -4814,6 +6047,9 @@ func (m *TeamMutation) Fields() []string { if m.email != nil { fields = append(fields, team.FieldEmail) } + if m.cluster_id != nil { + fields = append(fields, team.FieldClusterID) + } return fields } @@ -4836,6 +6072,8 @@ func (m *TeamMutation) Field(name string) (ent.Value, bool) { return m.Tier() case team.FieldEmail: return m.Email() + case team.FieldClusterID: + return m.ClusterID() } return nil, false } @@ -4859,6 +6097,8 @@ func (m *TeamMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldTier(ctx) case team.FieldEmail: return m.OldEmail(ctx) + case team.FieldClusterID: + return m.OldClusterID(ctx) } return nil, fmt.Errorf("unknown Team field %s", name) } @@ -4917,6 +6157,13 @@ func (m *TeamMutation) SetField(name string, value ent.Value) error { } m.SetEmail(v) return nil + case team.FieldClusterID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClusterID(v) + return nil } return fmt.Errorf("unknown Team field %s", name) } @@ -4956,6 +6203,9 @@ func (m *TeamMutation) ClearedFields() []string { if m.FieldCleared(team.FieldBlockedReason) { fields = append(fields, team.FieldBlockedReason) } + if m.FieldCleared(team.FieldClusterID) { + fields = append(fields, team.FieldClusterID) + } return fields } @@ -4979,6 +6229,9 @@ func (m *TeamMutation) ClearField(name string) error { case team.FieldBlockedReason: m.ClearBlockedReason() return nil + case team.FieldClusterID: + m.ClearClusterID() + return nil } return fmt.Errorf("unknown Team nullable field %s", name) } @@ -5008,6 +6261,9 @@ func (m *TeamMutation) ResetField(name string) error { case team.FieldEmail: m.ResetEmail() return nil + case team.FieldClusterID: + m.ResetClusterID() + return nil } return fmt.Errorf("unknown Team field %s", name) } @@ -5195,22 +6451,28 @@ func (m *TeamMutation) ResetEdge(name string) error { // TeamAPIKeyMutation represents an operation that mutates the TeamAPIKey nodes in the graph. type TeamAPIKeyMutation struct { config - op Op - typ string - id *uuid.UUID - api_key *string - created_at *time.Time - updated_at *time.Time - name *string - last_used *time.Time - clearedFields map[string]struct{} - team *uuid.UUID - clearedteam bool - creator *uuid.UUID - clearedcreator bool - done bool - oldValue func(context.Context) (*TeamAPIKey, error) - predicates []predicate.TeamAPIKey + op Op + typ string + id *uuid.UUID + api_key *string + api_key_hash *string + api_key_prefix *string + api_key_length *int + addapi_key_length *int + api_key_mask_prefix *string + api_key_mask_suffix *string + created_at *time.Time + updated_at *time.Time + name *string + last_used *time.Time + clearedFields map[string]struct{} + team *uuid.UUID + clearedteam bool + creator *uuid.UUID + clearedcreator bool + done bool + oldValue func(context.Context) (*TeamAPIKey, error) + predicates []predicate.TeamAPIKey } var _ ent.Mutation = (*TeamAPIKeyMutation)(nil) @@ -5353,6 +6615,206 @@ func (m *TeamAPIKeyMutation) ResetAPIKey() { m.api_key = nil } +// SetAPIKeyHash sets the "api_key_hash" field. +func (m *TeamAPIKeyMutation) SetAPIKeyHash(s string) { + m.api_key_hash = &s +} + +// APIKeyHash returns the value of the "api_key_hash" field in the mutation. +func (m *TeamAPIKeyMutation) APIKeyHash() (r string, exists bool) { + v := m.api_key_hash + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyHash returns the old "api_key_hash" field's value of the TeamAPIKey entity. +// If the TeamAPIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamAPIKeyMutation) OldAPIKeyHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyHash: %w", err) + } + return oldValue.APIKeyHash, nil +} + +// ResetAPIKeyHash resets all changes to the "api_key_hash" field. +func (m *TeamAPIKeyMutation) ResetAPIKeyHash() { + m.api_key_hash = nil +} + +// SetAPIKeyPrefix sets the "api_key_prefix" field. +func (m *TeamAPIKeyMutation) SetAPIKeyPrefix(s string) { + m.api_key_prefix = &s +} + +// APIKeyPrefix returns the value of the "api_key_prefix" field in the mutation. +func (m *TeamAPIKeyMutation) APIKeyPrefix() (r string, exists bool) { + v := m.api_key_prefix + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyPrefix returns the old "api_key_prefix" field's value of the TeamAPIKey entity. +// If the TeamAPIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamAPIKeyMutation) OldAPIKeyPrefix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyPrefix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyPrefix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyPrefix: %w", err) + } + return oldValue.APIKeyPrefix, nil +} + +// ResetAPIKeyPrefix resets all changes to the "api_key_prefix" field. +func (m *TeamAPIKeyMutation) ResetAPIKeyPrefix() { + m.api_key_prefix = nil +} + +// SetAPIKeyLength sets the "api_key_length" field. +func (m *TeamAPIKeyMutation) SetAPIKeyLength(i int) { + m.api_key_length = &i + m.addapi_key_length = nil +} + +// APIKeyLength returns the value of the "api_key_length" field in the mutation. +func (m *TeamAPIKeyMutation) APIKeyLength() (r int, exists bool) { + v := m.api_key_length + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyLength returns the old "api_key_length" field's value of the TeamAPIKey entity. +// If the TeamAPIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamAPIKeyMutation) OldAPIKeyLength(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyLength is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyLength requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyLength: %w", err) + } + return oldValue.APIKeyLength, nil +} + +// AddAPIKeyLength adds i to the "api_key_length" field. +func (m *TeamAPIKeyMutation) AddAPIKeyLength(i int) { + if m.addapi_key_length != nil { + *m.addapi_key_length += i + } else { + m.addapi_key_length = &i + } +} + +// AddedAPIKeyLength returns the value that was added to the "api_key_length" field in this mutation. +func (m *TeamAPIKeyMutation) AddedAPIKeyLength() (r int, exists bool) { + v := m.addapi_key_length + if v == nil { + return + } + return *v, true +} + +// ResetAPIKeyLength resets all changes to the "api_key_length" field. +func (m *TeamAPIKeyMutation) ResetAPIKeyLength() { + m.api_key_length = nil + m.addapi_key_length = nil +} + +// SetAPIKeyMaskPrefix sets the "api_key_mask_prefix" field. +func (m *TeamAPIKeyMutation) SetAPIKeyMaskPrefix(s string) { + m.api_key_mask_prefix = &s +} + +// APIKeyMaskPrefix returns the value of the "api_key_mask_prefix" field in the mutation. +func (m *TeamAPIKeyMutation) APIKeyMaskPrefix() (r string, exists bool) { + v := m.api_key_mask_prefix + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyMaskPrefix returns the old "api_key_mask_prefix" field's value of the TeamAPIKey entity. +// If the TeamAPIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamAPIKeyMutation) OldAPIKeyMaskPrefix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyMaskPrefix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyMaskPrefix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyMaskPrefix: %w", err) + } + return oldValue.APIKeyMaskPrefix, nil +} + +// ResetAPIKeyMaskPrefix resets all changes to the "api_key_mask_prefix" field. +func (m *TeamAPIKeyMutation) ResetAPIKeyMaskPrefix() { + m.api_key_mask_prefix = nil +} + +// SetAPIKeyMaskSuffix sets the "api_key_mask_suffix" field. +func (m *TeamAPIKeyMutation) SetAPIKeyMaskSuffix(s string) { + m.api_key_mask_suffix = &s +} + +// APIKeyMaskSuffix returns the value of the "api_key_mask_suffix" field in the mutation. +func (m *TeamAPIKeyMutation) APIKeyMaskSuffix() (r string, exists bool) { + v := m.api_key_mask_suffix + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyMaskSuffix returns the old "api_key_mask_suffix" field's value of the TeamAPIKey entity. +// If the TeamAPIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *TeamAPIKeyMutation) OldAPIKeyMaskSuffix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyMaskSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyMaskSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyMaskSuffix: %w", err) + } + return oldValue.APIKeyMaskSuffix, nil +} + +// ResetAPIKeyMaskSuffix resets all changes to the "api_key_mask_suffix" field. +func (m *TeamAPIKeyMutation) ResetAPIKeyMaskSuffix() { + m.api_key_mask_suffix = nil +} + // SetCreatedAt sets the "created_at" field. func (m *TeamAPIKeyMutation) SetCreatedAt(t time.Time) { m.created_at = &t @@ -5709,10 +7171,25 @@ func (m *TeamAPIKeyMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *TeamAPIKeyMutation) Fields() []string { - fields := make([]string, 0, 7) + fields := make([]string, 0, 12) if m.api_key != nil { fields = append(fields, teamapikey.FieldAPIKey) } + if m.api_key_hash != nil { + fields = append(fields, teamapikey.FieldAPIKeyHash) + } + if m.api_key_prefix != nil { + fields = append(fields, teamapikey.FieldAPIKeyPrefix) + } + if m.api_key_length != nil { + fields = append(fields, teamapikey.FieldAPIKeyLength) + } + if m.api_key_mask_prefix != nil { + fields = append(fields, teamapikey.FieldAPIKeyMaskPrefix) + } + if m.api_key_mask_suffix != nil { + fields = append(fields, teamapikey.FieldAPIKeyMaskSuffix) + } if m.created_at != nil { fields = append(fields, teamapikey.FieldCreatedAt) } @@ -5741,6 +7218,16 @@ func (m *TeamAPIKeyMutation) Field(name string) (ent.Value, bool) { switch name { case teamapikey.FieldAPIKey: return m.APIKey() + case teamapikey.FieldAPIKeyHash: + return m.APIKeyHash() + case teamapikey.FieldAPIKeyPrefix: + return m.APIKeyPrefix() + case teamapikey.FieldAPIKeyLength: + return m.APIKeyLength() + case teamapikey.FieldAPIKeyMaskPrefix: + return m.APIKeyMaskPrefix() + case teamapikey.FieldAPIKeyMaskSuffix: + return m.APIKeyMaskSuffix() case teamapikey.FieldCreatedAt: return m.CreatedAt() case teamapikey.FieldUpdatedAt: @@ -5764,6 +7251,16 @@ func (m *TeamAPIKeyMutation) OldField(ctx context.Context, name string) (ent.Val switch name { case teamapikey.FieldAPIKey: return m.OldAPIKey(ctx) + case teamapikey.FieldAPIKeyHash: + return m.OldAPIKeyHash(ctx) + case teamapikey.FieldAPIKeyPrefix: + return m.OldAPIKeyPrefix(ctx) + case teamapikey.FieldAPIKeyLength: + return m.OldAPIKeyLength(ctx) + case teamapikey.FieldAPIKeyMaskPrefix: + return m.OldAPIKeyMaskPrefix(ctx) + case teamapikey.FieldAPIKeyMaskSuffix: + return m.OldAPIKeyMaskSuffix(ctx) case teamapikey.FieldCreatedAt: return m.OldCreatedAt(ctx) case teamapikey.FieldUpdatedAt: @@ -5792,6 +7289,41 @@ func (m *TeamAPIKeyMutation) SetField(name string, value ent.Value) error { } m.SetAPIKey(v) return nil + case teamapikey.FieldAPIKeyHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyHash(v) + return nil + case teamapikey.FieldAPIKeyPrefix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyPrefix(v) + return nil + case teamapikey.FieldAPIKeyLength: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyLength(v) + return nil + case teamapikey.FieldAPIKeyMaskPrefix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyMaskPrefix(v) + return nil + case teamapikey.FieldAPIKeyMaskSuffix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyMaskSuffix(v) + return nil case teamapikey.FieldCreatedAt: v, ok := value.(time.Time) if !ok { @@ -5841,13 +7373,21 @@ func (m *TeamAPIKeyMutation) SetField(name string, value ent.Value) error { // AddedFields returns all numeric fields that were incremented/decremented during // this mutation. func (m *TeamAPIKeyMutation) AddedFields() []string { - return nil + var fields []string + if m.addapi_key_length != nil { + fields = append(fields, teamapikey.FieldAPIKeyLength) + } + return fields } // AddedField returns the numeric value that was incremented/decremented on a field // with the given name. The second boolean return value indicates that this field // was not set, or was not defined in the schema. func (m *TeamAPIKeyMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case teamapikey.FieldAPIKeyLength: + return m.AddedAPIKeyLength() + } return nil, false } @@ -5856,6 +7396,13 @@ func (m *TeamAPIKeyMutation) AddedField(name string) (ent.Value, bool) { // type. func (m *TeamAPIKeyMutation) AddField(name string, value ent.Value) error { switch name { + case teamapikey.FieldAPIKeyLength: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddAPIKeyLength(v) + return nil } return fmt.Errorf("unknown TeamAPIKey numeric field %s", name) } @@ -5907,6 +7454,21 @@ func (m *TeamAPIKeyMutation) ResetField(name string) error { case teamapikey.FieldAPIKey: m.ResetAPIKey() return nil + case teamapikey.FieldAPIKeyHash: + m.ResetAPIKeyHash() + return nil + case teamapikey.FieldAPIKeyPrefix: + m.ResetAPIKeyPrefix() + return nil + case teamapikey.FieldAPIKeyLength: + m.ResetAPIKeyLength() + return nil + case teamapikey.FieldAPIKeyMaskPrefix: + m.ResetAPIKeyMaskPrefix() + return nil + case teamapikey.FieldAPIKeyMaskSuffix: + m.ResetAPIKeyMaskSuffix() + return nil case teamapikey.FieldCreatedAt: m.ResetCreatedAt() return nil @@ -6724,8 +8286,8 @@ type UserMutation struct { created_envs map[string]struct{} removedcreated_envs map[string]struct{} clearedcreated_envs bool - access_tokens map[string]struct{} - removedaccess_tokens map[string]struct{} + access_tokens map[uuid.UUID]struct{} + removedaccess_tokens map[uuid.UUID]struct{} clearedaccess_tokens bool created_api_keys map[uuid.UUID]struct{} removedcreated_api_keys map[uuid.UUID]struct{} @@ -6987,9 +8549,9 @@ func (m *UserMutation) ResetCreatedEnvs() { } // AddAccessTokenIDs adds the "access_tokens" edge to the AccessToken entity by ids. -func (m *UserMutation) AddAccessTokenIDs(ids ...string) { +func (m *UserMutation) AddAccessTokenIDs(ids ...uuid.UUID) { if m.access_tokens == nil { - m.access_tokens = make(map[string]struct{}) + m.access_tokens = make(map[uuid.UUID]struct{}) } for i := range ids { m.access_tokens[ids[i]] = struct{}{} @@ -7007,9 +8569,9 @@ func (m *UserMutation) AccessTokensCleared() bool { } // RemoveAccessTokenIDs removes the "access_tokens" edge to the AccessToken entity by IDs. -func (m *UserMutation) RemoveAccessTokenIDs(ids ...string) { +func (m *UserMutation) RemoveAccessTokenIDs(ids ...uuid.UUID) { if m.removedaccess_tokens == nil { - m.removedaccess_tokens = make(map[string]struct{}) + m.removedaccess_tokens = make(map[uuid.UUID]struct{}) } for i := range ids { delete(m.access_tokens, ids[i]) @@ -7018,7 +8580,7 @@ func (m *UserMutation) RemoveAccessTokenIDs(ids ...string) { } // RemovedAccessTokens returns the removed IDs of the "access_tokens" edge to the AccessToken entity. -func (m *UserMutation) RemovedAccessTokensIDs() (ids []string) { +func (m *UserMutation) RemovedAccessTokensIDs() (ids []uuid.UUID) { for id := range m.removedaccess_tokens { ids = append(ids, id) } @@ -7026,7 +8588,7 @@ func (m *UserMutation) RemovedAccessTokensIDs() (ids []string) { } // AccessTokensIDs returns the "access_tokens" edge IDs in the mutation. -func (m *UserMutation) AccessTokensIDs() (ids []string) { +func (m *UserMutation) AccessTokensIDs() (ids []uuid.UUID) { for id := range m.access_tokens { ids = append(ids, id) } diff --git a/packages/shared/pkg/models/predicate/predicate.go b/packages/shared/pkg/models/predicate/predicate.go index bc01b27..3fd7041 100644 --- a/packages/shared/pkg/models/predicate/predicate.go +++ b/packages/shared/pkg/models/predicate/predicate.go @@ -9,6 +9,9 @@ import ( // AccessToken is the predicate function for accesstoken builders. type AccessToken func(*sql.Selector) +// Cluster is the predicate function for cluster builders. +type Cluster func(*sql.Selector) + // Env is the predicate function for env builders. type Env func(*sql.Selector) diff --git a/packages/shared/pkg/models/runtime.go b/packages/shared/pkg/models/runtime.go index 0e23b6d..8de8ec6 100644 --- a/packages/shared/pkg/models/runtime.go +++ b/packages/shared/pkg/models/runtime.go @@ -5,6 +5,8 @@ package models import ( "time" + "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" + "github.com/e2b-dev/infra/packages/shared/pkg/models/cluster" "github.com/e2b-dev/infra/packages/shared/pkg/models/env" "github.com/e2b-dev/infra/packages/shared/pkg/models/envalias" "github.com/e2b-dev/infra/packages/shared/pkg/models/envbuild" @@ -20,6 +22,26 @@ import ( // (default values, validators, hooks and policies) and stitches it // to their package variables. func init() { + accesstokenFields := schema.AccessToken{}.Fields() + _ = accesstokenFields + // accesstokenDescName is the schema descriptor for name field. + accesstokenDescName := accesstokenFields[7].Descriptor() + // accesstoken.DefaultName holds the default value on creation for the name field. + accesstoken.DefaultName = accesstokenDescName.Default.(string) + clusterFields := schema.Cluster{}.Fields() + _ = clusterFields + // clusterDescEndpoint is the schema descriptor for endpoint field. + clusterDescEndpoint := clusterFields[1].Descriptor() + // cluster.EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save. + cluster.EndpointValidator = clusterDescEndpoint.Validators[0].(func(string) error) + // clusterDescEndpointTLS is the schema descriptor for endpoint_tls field. + clusterDescEndpointTLS := clusterFields[2].Descriptor() + // cluster.DefaultEndpointTLS holds the default value on creation for the endpoint_tls field. + cluster.DefaultEndpointTLS = clusterDescEndpointTLS.Default.(bool) + // clusterDescToken is the schema descriptor for token field. + clusterDescToken := clusterFields[3].Descriptor() + // cluster.TokenValidator is a validator for the "token" field. It is called by the builders before save. + cluster.TokenValidator = clusterDescToken.Validators[0].(func(string) error) envFields := schema.Env{}.Fields() _ = envFields // envDescCreatedAt is the schema descriptor for created_at field. @@ -55,11 +77,11 @@ func init() { // envbuild.DefaultUpdatedAt holds the default value on creation for the updated_at field. envbuild.DefaultUpdatedAt = envbuildDescUpdatedAt.Default.(func() time.Time) // envbuildDescKernelVersion is the schema descriptor for kernel_version field. - envbuildDescKernelVersion := envbuildFields[12].Descriptor() + envbuildDescKernelVersion := envbuildFields[13].Descriptor() // envbuild.DefaultKernelVersion holds the default value on creation for the kernel_version field. envbuild.DefaultKernelVersion = envbuildDescKernelVersion.Default.(string) // envbuildDescFirecrackerVersion is the schema descriptor for firecracker_version field. - envbuildDescFirecrackerVersion := envbuildFields[13].Descriptor() + envbuildDescFirecrackerVersion := envbuildFields[14].Descriptor() // envbuild.DefaultFirecrackerVersion holds the default value on creation for the firecracker_version field. envbuild.DefaultFirecrackerVersion = envbuildDescFirecrackerVersion.Default.(string) snapshotFields := schema.Snapshot{}.Fields() @@ -68,6 +90,10 @@ func init() { snapshotDescCreatedAt := snapshotFields[1].Descriptor() // snapshot.DefaultCreatedAt holds the default value on creation for the created_at field. snapshot.DefaultCreatedAt = snapshotDescCreatedAt.Default.(func() time.Time) + // snapshotDescEnvSecure is the schema descriptor for env_secure field. + snapshotDescEnvSecure := snapshotFields[7].Descriptor() + // snapshot.DefaultEnvSecure holds the default value on creation for the env_secure field. + snapshot.DefaultEnvSecure = snapshotDescEnvSecure.Default.(bool) teamFields := schema.Team{}.Fields() _ = teamFields // teamDescCreatedAt is the schema descriptor for created_at field. @@ -81,11 +107,11 @@ func init() { teamapikeyFields := schema.TeamAPIKey{}.Fields() _ = teamapikeyFields // teamapikeyDescCreatedAt is the schema descriptor for created_at field. - teamapikeyDescCreatedAt := teamapikeyFields[2].Descriptor() + teamapikeyDescCreatedAt := teamapikeyFields[7].Descriptor() // teamapikey.DefaultCreatedAt holds the default value on creation for the created_at field. teamapikey.DefaultCreatedAt = teamapikeyDescCreatedAt.Default.(func() time.Time) // teamapikeyDescName is the schema descriptor for name field. - teamapikeyDescName := teamapikeyFields[5].Descriptor() + teamapikeyDescName := teamapikeyFields[10].Descriptor() // teamapikey.DefaultName holds the default value on creation for the name field. teamapikey.DefaultName = teamapikeyDescName.Default.(string) userFields := schema.User{}.Fields() diff --git a/packages/shared/pkg/models/snapshot.go b/packages/shared/pkg/models/snapshot.go index dbb7052..676e825 100644 --- a/packages/shared/pkg/models/snapshot.go +++ b/packages/shared/pkg/models/snapshot.go @@ -30,6 +30,10 @@ type Snapshot struct { SandboxID string `json:"sandbox_id,omitempty"` // Metadata holds the value of the "metadata" field. Metadata map[string]string `json:"metadata,omitempty"` + // SandboxStartedAt holds the value of the "sandbox_started_at" field. + SandboxStartedAt time.Time `json:"sandbox_started_at,omitempty"` + // EnvSecure holds the value of the "env_secure" field. + EnvSecure bool `json:"env_secure,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the SnapshotQuery when eager-loading is set. Edges SnapshotEdges `json:"edges"` @@ -65,9 +69,11 @@ func (*Snapshot) scanValues(columns []string) ([]any, error) { switch columns[i] { case snapshot.FieldMetadata: values[i] = new([]byte) + case snapshot.FieldEnvSecure: + values[i] = new(sql.NullBool) case snapshot.FieldBaseEnvID, snapshot.FieldEnvID, snapshot.FieldSandboxID: values[i] = new(sql.NullString) - case snapshot.FieldCreatedAt: + case snapshot.FieldCreatedAt, snapshot.FieldSandboxStartedAt: values[i] = new(sql.NullTime) case snapshot.FieldID: values[i] = new(uuid.UUID) @@ -124,6 +130,18 @@ func (s *Snapshot) assignValues(columns []string, values []any) error { return fmt.Errorf("unmarshal field metadata: %w", err) } } + case snapshot.FieldSandboxStartedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field sandbox_started_at", values[i]) + } else if value.Valid { + s.SandboxStartedAt = value.Time + } + case snapshot.FieldEnvSecure: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field env_secure", values[i]) + } else if value.Valid { + s.EnvSecure = value.Bool + } default: s.selectValues.Set(columns[i], values[i]) } @@ -179,6 +197,12 @@ func (s *Snapshot) String() string { builder.WriteString(", ") builder.WriteString("metadata=") builder.WriteString(fmt.Sprintf("%v", s.Metadata)) + builder.WriteString(", ") + builder.WriteString("sandbox_started_at=") + builder.WriteString(s.SandboxStartedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("env_secure=") + builder.WriteString(fmt.Sprintf("%v", s.EnvSecure)) builder.WriteByte(')') return builder.String() } diff --git a/packages/shared/pkg/models/snapshot/snapshot.go b/packages/shared/pkg/models/snapshot/snapshot.go index 61145a2..2f439a1 100644 --- a/packages/shared/pkg/models/snapshot/snapshot.go +++ b/packages/shared/pkg/models/snapshot/snapshot.go @@ -24,6 +24,10 @@ const ( FieldSandboxID = "sandbox_id" // FieldMetadata holds the string denoting the metadata field in the database. FieldMetadata = "metadata" + // FieldSandboxStartedAt holds the string denoting the sandbox_started_at field in the database. + FieldSandboxStartedAt = "sandbox_started_at" + // FieldEnvSecure holds the string denoting the env_secure field in the database. + FieldEnvSecure = "env_secure" // EdgeEnv holds the string denoting the env edge name in mutations. EdgeEnv = "env" // Table holds the table name of the snapshot in the database. @@ -45,6 +49,8 @@ var Columns = []string{ FieldEnvID, FieldSandboxID, FieldMetadata, + FieldSandboxStartedAt, + FieldEnvSecure, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -60,6 +66,8 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time + // DefaultEnvSecure holds the default value on creation for the "env_secure" field. + DefaultEnvSecure bool ) // OrderOption defines the ordering options for the Snapshot queries. @@ -90,6 +98,16 @@ func BySandboxID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldSandboxID, opts...).ToFunc() } +// BySandboxStartedAt orders the results by the sandbox_started_at field. +func BySandboxStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSandboxStartedAt, opts...).ToFunc() +} + +// ByEnvSecure orders the results by the env_secure field. +func ByEnvSecure(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnvSecure, opts...).ToFunc() +} + // ByEnvField orders the results by env field. func ByEnvField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/snapshot/where.go b/packages/shared/pkg/models/snapshot/where.go index 0910ec3..d2fc5cf 100644 --- a/packages/shared/pkg/models/snapshot/where.go +++ b/packages/shared/pkg/models/snapshot/where.go @@ -77,6 +77,16 @@ func SandboxID(v string) predicate.Snapshot { return predicate.Snapshot(sql.FieldEQ(FieldSandboxID, v)) } +// SandboxStartedAt applies equality check predicate on the "sandbox_started_at" field. It's identical to SandboxStartedAtEQ. +func SandboxStartedAt(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldEQ(FieldSandboxStartedAt, v)) +} + +// EnvSecure applies equality check predicate on the "env_secure" field. It's identical to EnvSecureEQ. +func EnvSecure(v bool) predicate.Snapshot { + return predicate.Snapshot(sql.FieldEQ(FieldEnvSecure, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Snapshot { return predicate.Snapshot(sql.FieldEQ(FieldCreatedAt, v)) @@ -312,6 +322,56 @@ func SandboxIDContainsFold(v string) predicate.Snapshot { return predicate.Snapshot(sql.FieldContainsFold(FieldSandboxID, v)) } +// SandboxStartedAtEQ applies the EQ predicate on the "sandbox_started_at" field. +func SandboxStartedAtEQ(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldEQ(FieldSandboxStartedAt, v)) +} + +// SandboxStartedAtNEQ applies the NEQ predicate on the "sandbox_started_at" field. +func SandboxStartedAtNEQ(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldNEQ(FieldSandboxStartedAt, v)) +} + +// SandboxStartedAtIn applies the In predicate on the "sandbox_started_at" field. +func SandboxStartedAtIn(vs ...time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldIn(FieldSandboxStartedAt, vs...)) +} + +// SandboxStartedAtNotIn applies the NotIn predicate on the "sandbox_started_at" field. +func SandboxStartedAtNotIn(vs ...time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldNotIn(FieldSandboxStartedAt, vs...)) +} + +// SandboxStartedAtGT applies the GT predicate on the "sandbox_started_at" field. +func SandboxStartedAtGT(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldGT(FieldSandboxStartedAt, v)) +} + +// SandboxStartedAtGTE applies the GTE predicate on the "sandbox_started_at" field. +func SandboxStartedAtGTE(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldGTE(FieldSandboxStartedAt, v)) +} + +// SandboxStartedAtLT applies the LT predicate on the "sandbox_started_at" field. +func SandboxStartedAtLT(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldLT(FieldSandboxStartedAt, v)) +} + +// SandboxStartedAtLTE applies the LTE predicate on the "sandbox_started_at" field. +func SandboxStartedAtLTE(v time.Time) predicate.Snapshot { + return predicate.Snapshot(sql.FieldLTE(FieldSandboxStartedAt, v)) +} + +// EnvSecureEQ applies the EQ predicate on the "env_secure" field. +func EnvSecureEQ(v bool) predicate.Snapshot { + return predicate.Snapshot(sql.FieldEQ(FieldEnvSecure, v)) +} + +// EnvSecureNEQ applies the NEQ predicate on the "env_secure" field. +func EnvSecureNEQ(v bool) predicate.Snapshot { + return predicate.Snapshot(sql.FieldNEQ(FieldEnvSecure, v)) +} + // HasEnv applies the HasEdge predicate on the "env" edge. func HasEnv() predicate.Snapshot { return predicate.Snapshot(func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/snapshot_create.go b/packages/shared/pkg/models/snapshot_create.go index 396ade5..01f0245 100644 --- a/packages/shared/pkg/models/snapshot_create.go +++ b/packages/shared/pkg/models/snapshot_create.go @@ -63,6 +63,26 @@ func (sc *SnapshotCreate) SetMetadata(m map[string]string) *SnapshotCreate { return sc } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (sc *SnapshotCreate) SetSandboxStartedAt(t time.Time) *SnapshotCreate { + sc.mutation.SetSandboxStartedAt(t) + return sc +} + +// SetEnvSecure sets the "env_secure" field. +func (sc *SnapshotCreate) SetEnvSecure(b bool) *SnapshotCreate { + sc.mutation.SetEnvSecure(b) + return sc +} + +// SetNillableEnvSecure sets the "env_secure" field if the given value is not nil. +func (sc *SnapshotCreate) SetNillableEnvSecure(b *bool) *SnapshotCreate { + if b != nil { + sc.SetEnvSecure(*b) + } + return sc +} + // SetID sets the "id" field. func (sc *SnapshotCreate) SetID(u uuid.UUID) *SnapshotCreate { sc.mutation.SetID(u) @@ -113,6 +133,10 @@ func (sc *SnapshotCreate) defaults() { v := snapshot.DefaultCreatedAt() sc.mutation.SetCreatedAt(v) } + if _, ok := sc.mutation.EnvSecure(); !ok { + v := snapshot.DefaultEnvSecure + sc.mutation.SetEnvSecure(v) + } } // check runs all checks and user-defined validators on the builder. @@ -132,6 +156,12 @@ func (sc *SnapshotCreate) check() error { if _, ok := sc.mutation.Metadata(); !ok { return &ValidationError{Name: "metadata", err: errors.New(`models: missing required field "Snapshot.metadata"`)} } + if _, ok := sc.mutation.SandboxStartedAt(); !ok { + return &ValidationError{Name: "sandbox_started_at", err: errors.New(`models: missing required field "Snapshot.sandbox_started_at"`)} + } + if _, ok := sc.mutation.EnvSecure(); !ok { + return &ValidationError{Name: "env_secure", err: errors.New(`models: missing required field "Snapshot.env_secure"`)} + } if _, ok := sc.mutation.EnvID(); !ok { return &ValidationError{Name: "env", err: errors.New(`models: missing required edge "Snapshot.env"`)} } @@ -188,6 +218,14 @@ func (sc *SnapshotCreate) createSpec() (*Snapshot, *sqlgraph.CreateSpec) { _spec.SetField(snapshot.FieldMetadata, field.TypeJSON, value) _node.Metadata = value } + if value, ok := sc.mutation.SandboxStartedAt(); ok { + _spec.SetField(snapshot.FieldSandboxStartedAt, field.TypeTime, value) + _node.SandboxStartedAt = value + } + if value, ok := sc.mutation.EnvSecure(); ok { + _spec.SetField(snapshot.FieldEnvSecure, field.TypeBool, value) + _node.EnvSecure = value + } if nodes := sc.mutation.EnvIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -306,6 +344,30 @@ func (u *SnapshotUpsert) UpdateMetadata() *SnapshotUpsert { return u } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (u *SnapshotUpsert) SetSandboxStartedAt(v time.Time) *SnapshotUpsert { + u.Set(snapshot.FieldSandboxStartedAt, v) + return u +} + +// UpdateSandboxStartedAt sets the "sandbox_started_at" field to the value that was provided on create. +func (u *SnapshotUpsert) UpdateSandboxStartedAt() *SnapshotUpsert { + u.SetExcluded(snapshot.FieldSandboxStartedAt) + return u +} + +// SetEnvSecure sets the "env_secure" field. +func (u *SnapshotUpsert) SetEnvSecure(v bool) *SnapshotUpsert { + u.Set(snapshot.FieldEnvSecure, v) + return u +} + +// UpdateEnvSecure sets the "env_secure" field to the value that was provided on create. +func (u *SnapshotUpsert) UpdateEnvSecure() *SnapshotUpsert { + u.SetExcluded(snapshot.FieldEnvSecure) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. // Using this option is equivalent to using: // @@ -413,6 +475,34 @@ func (u *SnapshotUpsertOne) UpdateMetadata() *SnapshotUpsertOne { }) } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (u *SnapshotUpsertOne) SetSandboxStartedAt(v time.Time) *SnapshotUpsertOne { + return u.Update(func(s *SnapshotUpsert) { + s.SetSandboxStartedAt(v) + }) +} + +// UpdateSandboxStartedAt sets the "sandbox_started_at" field to the value that was provided on create. +func (u *SnapshotUpsertOne) UpdateSandboxStartedAt() *SnapshotUpsertOne { + return u.Update(func(s *SnapshotUpsert) { + s.UpdateSandboxStartedAt() + }) +} + +// SetEnvSecure sets the "env_secure" field. +func (u *SnapshotUpsertOne) SetEnvSecure(v bool) *SnapshotUpsertOne { + return u.Update(func(s *SnapshotUpsert) { + s.SetEnvSecure(v) + }) +} + +// UpdateEnvSecure sets the "env_secure" field to the value that was provided on create. +func (u *SnapshotUpsertOne) UpdateEnvSecure() *SnapshotUpsertOne { + return u.Update(func(s *SnapshotUpsert) { + s.UpdateEnvSecure() + }) +} + // Exec executes the query. func (u *SnapshotUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -687,6 +777,34 @@ func (u *SnapshotUpsertBulk) UpdateMetadata() *SnapshotUpsertBulk { }) } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (u *SnapshotUpsertBulk) SetSandboxStartedAt(v time.Time) *SnapshotUpsertBulk { + return u.Update(func(s *SnapshotUpsert) { + s.SetSandboxStartedAt(v) + }) +} + +// UpdateSandboxStartedAt sets the "sandbox_started_at" field to the value that was provided on create. +func (u *SnapshotUpsertBulk) UpdateSandboxStartedAt() *SnapshotUpsertBulk { + return u.Update(func(s *SnapshotUpsert) { + s.UpdateSandboxStartedAt() + }) +} + +// SetEnvSecure sets the "env_secure" field. +func (u *SnapshotUpsertBulk) SetEnvSecure(v bool) *SnapshotUpsertBulk { + return u.Update(func(s *SnapshotUpsert) { + s.SetEnvSecure(v) + }) +} + +// UpdateEnvSecure sets the "env_secure" field to the value that was provided on create. +func (u *SnapshotUpsertBulk) UpdateEnvSecure() *SnapshotUpsertBulk { + return u.Update(func(s *SnapshotUpsert) { + s.UpdateEnvSecure() + }) +} + // Exec executes the query. func (u *SnapshotUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/packages/shared/pkg/models/snapshot_update.go b/packages/shared/pkg/models/snapshot_update.go index 10fb2be..67e4e6d 100644 --- a/packages/shared/pkg/models/snapshot_update.go +++ b/packages/shared/pkg/models/snapshot_update.go @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "time" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -78,6 +79,34 @@ func (su *SnapshotUpdate) SetMetadata(m map[string]string) *SnapshotUpdate { return su } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (su *SnapshotUpdate) SetSandboxStartedAt(t time.Time) *SnapshotUpdate { + su.mutation.SetSandboxStartedAt(t) + return su +} + +// SetNillableSandboxStartedAt sets the "sandbox_started_at" field if the given value is not nil. +func (su *SnapshotUpdate) SetNillableSandboxStartedAt(t *time.Time) *SnapshotUpdate { + if t != nil { + su.SetSandboxStartedAt(*t) + } + return su +} + +// SetEnvSecure sets the "env_secure" field. +func (su *SnapshotUpdate) SetEnvSecure(b bool) *SnapshotUpdate { + su.mutation.SetEnvSecure(b) + return su +} + +// SetNillableEnvSecure sets the "env_secure" field if the given value is not nil. +func (su *SnapshotUpdate) SetNillableEnvSecure(b *bool) *SnapshotUpdate { + if b != nil { + su.SetEnvSecure(*b) + } + return su +} + // SetEnv sets the "env" edge to the Env entity. func (su *SnapshotUpdate) SetEnv(e *Env) *SnapshotUpdate { return su.SetEnvID(e.ID) @@ -156,6 +185,12 @@ func (su *SnapshotUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := su.mutation.Metadata(); ok { _spec.SetField(snapshot.FieldMetadata, field.TypeJSON, value) } + if value, ok := su.mutation.SandboxStartedAt(); ok { + _spec.SetField(snapshot.FieldSandboxStartedAt, field.TypeTime, value) + } + if value, ok := su.mutation.EnvSecure(); ok { + _spec.SetField(snapshot.FieldEnvSecure, field.TypeBool, value) + } if su.mutation.EnvCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -259,6 +294,34 @@ func (suo *SnapshotUpdateOne) SetMetadata(m map[string]string) *SnapshotUpdateOn return suo } +// SetSandboxStartedAt sets the "sandbox_started_at" field. +func (suo *SnapshotUpdateOne) SetSandboxStartedAt(t time.Time) *SnapshotUpdateOne { + suo.mutation.SetSandboxStartedAt(t) + return suo +} + +// SetNillableSandboxStartedAt sets the "sandbox_started_at" field if the given value is not nil. +func (suo *SnapshotUpdateOne) SetNillableSandboxStartedAt(t *time.Time) *SnapshotUpdateOne { + if t != nil { + suo.SetSandboxStartedAt(*t) + } + return suo +} + +// SetEnvSecure sets the "env_secure" field. +func (suo *SnapshotUpdateOne) SetEnvSecure(b bool) *SnapshotUpdateOne { + suo.mutation.SetEnvSecure(b) + return suo +} + +// SetNillableEnvSecure sets the "env_secure" field if the given value is not nil. +func (suo *SnapshotUpdateOne) SetNillableEnvSecure(b *bool) *SnapshotUpdateOne { + if b != nil { + suo.SetEnvSecure(*b) + } + return suo +} + // SetEnv sets the "env" edge to the Env entity. func (suo *SnapshotUpdateOne) SetEnv(e *Env) *SnapshotUpdateOne { return suo.SetEnvID(e.ID) @@ -367,6 +430,12 @@ func (suo *SnapshotUpdateOne) sqlSave(ctx context.Context) (_node *Snapshot, err if value, ok := suo.mutation.Metadata(); ok { _spec.SetField(snapshot.FieldMetadata, field.TypeJSON, value) } + if value, ok := suo.mutation.SandboxStartedAt(); ok { + _spec.SetField(snapshot.FieldSandboxStartedAt, field.TypeTime, value) + } + if value, ok := suo.mutation.EnvSecure(); ok { + _spec.SetField(snapshot.FieldEnvSecure, field.TypeBool, value) + } if suo.mutation.EnvCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/packages/shared/pkg/models/team.go b/packages/shared/pkg/models/team.go index c3007cf..80cef44 100644 --- a/packages/shared/pkg/models/team.go +++ b/packages/shared/pkg/models/team.go @@ -33,6 +33,8 @@ type Team struct { Tier string `json:"tier,omitempty"` // Email holds the value of the "email" field. Email string `json:"email,omitempty"` + // ClusterID holds the value of the "cluster_id" field. + ClusterID *uuid.UUID `json:"cluster_id,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the TeamQuery when eager-loading is set. Edges TeamEdges `json:"edges"` @@ -110,6 +112,8 @@ func (*Team) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case team.FieldClusterID: + values[i] = &sql.NullScanner{S: new(uuid.UUID)} case team.FieldIsBanned, team.FieldIsBlocked: values[i] = new(sql.NullBool) case team.FieldBlockedReason, team.FieldName, team.FieldTier, team.FieldEmail: @@ -182,6 +186,13 @@ func (t *Team) assignValues(columns []string, values []any) error { } else if value.Valid { t.Email = value.String } + case team.FieldClusterID: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field cluster_id", values[i]) + } else if value.Valid { + t.ClusterID = new(uuid.UUID) + *t.ClusterID = *value.S.(*uuid.UUID) + } default: t.selectValues.Set(columns[i], values[i]) } @@ -265,6 +276,11 @@ func (t *Team) String() string { builder.WriteString(", ") builder.WriteString("email=") builder.WriteString(t.Email) + builder.WriteString(", ") + if v := t.ClusterID; v != nil { + builder.WriteString("cluster_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } builder.WriteByte(')') return builder.String() } diff --git a/packages/shared/pkg/models/team/team.go b/packages/shared/pkg/models/team/team.go index 3721ca7..184be79 100644 --- a/packages/shared/pkg/models/team/team.go +++ b/packages/shared/pkg/models/team/team.go @@ -28,6 +28,8 @@ const ( FieldTier = "tier" // FieldEmail holds the string denoting the email field in the database. FieldEmail = "email" + // FieldClusterID holds the string denoting the cluster_id field in the database. + FieldClusterID = "cluster_id" // EdgeUsers holds the string denoting the users edge name in mutations. EdgeUsers = "users" // EdgeTeamAPIKeys holds the string denoting the team_api_keys edge name in mutations. @@ -85,6 +87,7 @@ var Columns = []string{ FieldName, FieldTier, FieldEmail, + FieldClusterID, } var ( @@ -153,6 +156,11 @@ func ByEmail(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldEmail, opts...).ToFunc() } +// ByClusterID orders the results by the cluster_id field. +func ByClusterID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClusterID, opts...).ToFunc() +} + // ByUsersCount orders the results by users count. func ByUsersCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/team/where.go b/packages/shared/pkg/models/team/where.go index a71cbc7..270ae2d 100644 --- a/packages/shared/pkg/models/team/where.go +++ b/packages/shared/pkg/models/team/where.go @@ -92,6 +92,11 @@ func Email(v string) predicate.Team { return predicate.Team(sql.FieldEQ(FieldEmail, v)) } +// ClusterID applies equality check predicate on the "cluster_id" field. It's identical to ClusterIDEQ. +func ClusterID(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldEQ(FieldClusterID, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Team { return predicate.Team(sql.FieldEQ(FieldCreatedAt, v)) @@ -442,6 +447,56 @@ func EmailContainsFold(v string) predicate.Team { return predicate.Team(sql.FieldContainsFold(FieldEmail, v)) } +// ClusterIDEQ applies the EQ predicate on the "cluster_id" field. +func ClusterIDEQ(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldEQ(FieldClusterID, v)) +} + +// ClusterIDNEQ applies the NEQ predicate on the "cluster_id" field. +func ClusterIDNEQ(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldNEQ(FieldClusterID, v)) +} + +// ClusterIDIn applies the In predicate on the "cluster_id" field. +func ClusterIDIn(vs ...uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldIn(FieldClusterID, vs...)) +} + +// ClusterIDNotIn applies the NotIn predicate on the "cluster_id" field. +func ClusterIDNotIn(vs ...uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldNotIn(FieldClusterID, vs...)) +} + +// ClusterIDGT applies the GT predicate on the "cluster_id" field. +func ClusterIDGT(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldGT(FieldClusterID, v)) +} + +// ClusterIDGTE applies the GTE predicate on the "cluster_id" field. +func ClusterIDGTE(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldGTE(FieldClusterID, v)) +} + +// ClusterIDLT applies the LT predicate on the "cluster_id" field. +func ClusterIDLT(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldLT(FieldClusterID, v)) +} + +// ClusterIDLTE applies the LTE predicate on the "cluster_id" field. +func ClusterIDLTE(v uuid.UUID) predicate.Team { + return predicate.Team(sql.FieldLTE(FieldClusterID, v)) +} + +// ClusterIDIsNil applies the IsNil predicate on the "cluster_id" field. +func ClusterIDIsNil() predicate.Team { + return predicate.Team(sql.FieldIsNull(FieldClusterID)) +} + +// ClusterIDNotNil applies the NotNil predicate on the "cluster_id" field. +func ClusterIDNotNil() predicate.Team { + return predicate.Team(sql.FieldNotNull(FieldClusterID)) +} + // HasUsers applies the HasEdge predicate on the "users" edge. func HasUsers() predicate.Team { return predicate.Team(func(s *sql.Selector) { diff --git a/packages/shared/pkg/models/team_create.go b/packages/shared/pkg/models/team_create.go index 68cbc68..9ebf738 100644 --- a/packages/shared/pkg/models/team_create.go +++ b/packages/shared/pkg/models/team_create.go @@ -103,6 +103,20 @@ func (tc *TeamCreate) SetEmail(s string) *TeamCreate { return tc } +// SetClusterID sets the "cluster_id" field. +func (tc *TeamCreate) SetClusterID(u uuid.UUID) *TeamCreate { + tc.mutation.SetClusterID(u) + return tc +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (tc *TeamCreate) SetNillableClusterID(u *uuid.UUID) *TeamCreate { + if u != nil { + tc.SetClusterID(*u) + } + return tc +} + // SetID sets the "id" field. func (tc *TeamCreate) SetID(u uuid.UUID) *TeamCreate { tc.mutation.SetID(u) @@ -304,6 +318,10 @@ func (tc *TeamCreate) createSpec() (*Team, *sqlgraph.CreateSpec) { _spec.SetField(team.FieldEmail, field.TypeString, value) _node.Email = value } + if value, ok := tc.mutation.ClusterID(); ok { + _spec.SetField(team.FieldClusterID, field.TypeUUID, value) + _node.ClusterID = &value + } if nodes := tc.mutation.UsersIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -536,6 +554,24 @@ func (u *TeamUpsert) UpdateEmail() *TeamUpsert { return u } +// SetClusterID sets the "cluster_id" field. +func (u *TeamUpsert) SetClusterID(v uuid.UUID) *TeamUpsert { + u.Set(team.FieldClusterID, v) + return u +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *TeamUpsert) UpdateClusterID() *TeamUpsert { + u.SetExcluded(team.FieldClusterID) + return u +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *TeamUpsert) ClearClusterID() *TeamUpsert { + u.SetNull(team.FieldClusterID) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. // Using this option is equivalent to using: // @@ -692,6 +728,27 @@ func (u *TeamUpsertOne) UpdateEmail() *TeamUpsertOne { }) } +// SetClusterID sets the "cluster_id" field. +func (u *TeamUpsertOne) SetClusterID(v uuid.UUID) *TeamUpsertOne { + return u.Update(func(s *TeamUpsert) { + s.SetClusterID(v) + }) +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *TeamUpsertOne) UpdateClusterID() *TeamUpsertOne { + return u.Update(func(s *TeamUpsert) { + s.UpdateClusterID() + }) +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *TeamUpsertOne) ClearClusterID() *TeamUpsertOne { + return u.Update(func(s *TeamUpsert) { + s.ClearClusterID() + }) +} + // Exec executes the query. func (u *TeamUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -1015,6 +1072,27 @@ func (u *TeamUpsertBulk) UpdateEmail() *TeamUpsertBulk { }) } +// SetClusterID sets the "cluster_id" field. +func (u *TeamUpsertBulk) SetClusterID(v uuid.UUID) *TeamUpsertBulk { + return u.Update(func(s *TeamUpsert) { + s.SetClusterID(v) + }) +} + +// UpdateClusterID sets the "cluster_id" field to the value that was provided on create. +func (u *TeamUpsertBulk) UpdateClusterID() *TeamUpsertBulk { + return u.Update(func(s *TeamUpsert) { + s.UpdateClusterID() + }) +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (u *TeamUpsertBulk) ClearClusterID() *TeamUpsertBulk { + return u.Update(func(s *TeamUpsert) { + s.ClearClusterID() + }) +} + // Exec executes the query. func (u *TeamUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/packages/shared/pkg/models/team_update.go b/packages/shared/pkg/models/team_update.go index 4ffdc46..ddc1b8c 100644 --- a/packages/shared/pkg/models/team_update.go +++ b/packages/shared/pkg/models/team_update.go @@ -137,6 +137,26 @@ func (tu *TeamUpdate) SetNillableEmail(s *string) *TeamUpdate { return tu } +// SetClusterID sets the "cluster_id" field. +func (tu *TeamUpdate) SetClusterID(u uuid.UUID) *TeamUpdate { + tu.mutation.SetClusterID(u) + return tu +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (tu *TeamUpdate) SetNillableClusterID(u *uuid.UUID) *TeamUpdate { + if u != nil { + tu.SetClusterID(*u) + } + return tu +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (tu *TeamUpdate) ClearClusterID() *TeamUpdate { + tu.mutation.ClearClusterID() + return tu +} + // AddUserIDs adds the "users" edge to the User entity by IDs. func (tu *TeamUpdate) AddUserIDs(ids ...uuid.UUID) *TeamUpdate { tu.mutation.AddUserIDs(ids...) @@ -385,6 +405,12 @@ func (tu *TeamUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := tu.mutation.Email(); ok { _spec.SetField(team.FieldEmail, field.TypeString, value) } + if value, ok := tu.mutation.ClusterID(); ok { + _spec.SetField(team.FieldClusterID, field.TypeUUID, value) + } + if tu.mutation.ClusterIDCleared() { + _spec.ClearField(team.FieldClusterID, field.TypeUUID) + } if tu.mutation.UsersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -746,6 +772,26 @@ func (tuo *TeamUpdateOne) SetNillableEmail(s *string) *TeamUpdateOne { return tuo } +// SetClusterID sets the "cluster_id" field. +func (tuo *TeamUpdateOne) SetClusterID(u uuid.UUID) *TeamUpdateOne { + tuo.mutation.SetClusterID(u) + return tuo +} + +// SetNillableClusterID sets the "cluster_id" field if the given value is not nil. +func (tuo *TeamUpdateOne) SetNillableClusterID(u *uuid.UUID) *TeamUpdateOne { + if u != nil { + tuo.SetClusterID(*u) + } + return tuo +} + +// ClearClusterID clears the value of the "cluster_id" field. +func (tuo *TeamUpdateOne) ClearClusterID() *TeamUpdateOne { + tuo.mutation.ClearClusterID() + return tuo +} + // AddUserIDs adds the "users" edge to the User entity by IDs. func (tuo *TeamUpdateOne) AddUserIDs(ids ...uuid.UUID) *TeamUpdateOne { tuo.mutation.AddUserIDs(ids...) @@ -1024,6 +1070,12 @@ func (tuo *TeamUpdateOne) sqlSave(ctx context.Context) (_node *Team, err error) if value, ok := tuo.mutation.Email(); ok { _spec.SetField(team.FieldEmail, field.TypeString, value) } + if value, ok := tuo.mutation.ClusterID(); ok { + _spec.SetField(team.FieldClusterID, field.TypeUUID, value) + } + if tuo.mutation.ClusterIDCleared() { + _spec.ClearField(team.FieldClusterID, field.TypeUUID) + } if tuo.mutation.UsersCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, diff --git a/packages/shared/pkg/models/teamapikey.go b/packages/shared/pkg/models/teamapikey.go index 6539436..cb307d0 100644 --- a/packages/shared/pkg/models/teamapikey.go +++ b/packages/shared/pkg/models/teamapikey.go @@ -22,6 +22,16 @@ type TeamAPIKey struct { ID uuid.UUID `json:"id,omitempty"` // APIKey holds the value of the "api_key" field. APIKey string `json:"-"` + // APIKeyHash holds the value of the "api_key_hash" field. + APIKeyHash string `json:"-"` + // APIKeyPrefix holds the value of the "api_key_prefix" field. + APIKeyPrefix string `json:"api_key_prefix,omitempty"` + // APIKeyLength holds the value of the "api_key_length" field. + APIKeyLength int `json:"api_key_length,omitempty"` + // APIKeyMaskPrefix holds the value of the "api_key_mask_prefix" field. + APIKeyMaskPrefix string `json:"api_key_mask_prefix,omitempty"` + // APIKeyMaskSuffix holds the value of the "api_key_mask_suffix" field. + APIKeyMaskSuffix string `json:"api_key_mask_suffix,omitempty"` // CreatedAt holds the value of the "created_at" field. CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. @@ -84,7 +94,9 @@ func (*TeamAPIKey) scanValues(columns []string) ([]any, error) { switch columns[i] { case teamapikey.FieldCreatedBy: values[i] = &sql.NullScanner{S: new(uuid.UUID)} - case teamapikey.FieldAPIKey, teamapikey.FieldName: + case teamapikey.FieldAPIKeyLength: + values[i] = new(sql.NullInt64) + case teamapikey.FieldAPIKey, teamapikey.FieldAPIKeyHash, teamapikey.FieldAPIKeyPrefix, teamapikey.FieldAPIKeyMaskPrefix, teamapikey.FieldAPIKeyMaskSuffix, teamapikey.FieldName: values[i] = new(sql.NullString) case teamapikey.FieldCreatedAt, teamapikey.FieldUpdatedAt, teamapikey.FieldLastUsed: values[i] = new(sql.NullTime) @@ -117,6 +129,36 @@ func (tak *TeamAPIKey) assignValues(columns []string, values []any) error { } else if value.Valid { tak.APIKey = value.String } + case teamapikey.FieldAPIKeyHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key_hash", values[i]) + } else if value.Valid { + tak.APIKeyHash = value.String + } + case teamapikey.FieldAPIKeyPrefix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key_prefix", values[i]) + } else if value.Valid { + tak.APIKeyPrefix = value.String + } + case teamapikey.FieldAPIKeyLength: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field api_key_length", values[i]) + } else if value.Valid { + tak.APIKeyLength = int(value.Int64) + } + case teamapikey.FieldAPIKeyMaskPrefix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key_mask_prefix", values[i]) + } else if value.Valid { + tak.APIKeyMaskPrefix = value.String + } + case teamapikey.FieldAPIKeyMaskSuffix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key_mask_suffix", values[i]) + } else if value.Valid { + tak.APIKeyMaskSuffix = value.String + } case teamapikey.FieldCreatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) @@ -204,6 +246,20 @@ func (tak *TeamAPIKey) String() string { builder.WriteString(fmt.Sprintf("id=%v, ", tak.ID)) builder.WriteString("api_key=") builder.WriteString(", ") + builder.WriteString("api_key_hash=") + builder.WriteString(", ") + builder.WriteString("api_key_prefix=") + builder.WriteString(tak.APIKeyPrefix) + builder.WriteString(", ") + builder.WriteString("api_key_length=") + builder.WriteString(fmt.Sprintf("%v", tak.APIKeyLength)) + builder.WriteString(", ") + builder.WriteString("api_key_mask_prefix=") + builder.WriteString(tak.APIKeyMaskPrefix) + builder.WriteString(", ") + builder.WriteString("api_key_mask_suffix=") + builder.WriteString(tak.APIKeyMaskSuffix) + builder.WriteString(", ") builder.WriteString("created_at=") builder.WriteString(tak.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") diff --git a/packages/shared/pkg/models/teamapikey/teamapikey.go b/packages/shared/pkg/models/teamapikey/teamapikey.go index ec803ed..25c8758 100644 --- a/packages/shared/pkg/models/teamapikey/teamapikey.go +++ b/packages/shared/pkg/models/teamapikey/teamapikey.go @@ -16,6 +16,16 @@ const ( FieldID = "id" // FieldAPIKey holds the string denoting the api_key field in the database. FieldAPIKey = "api_key" + // FieldAPIKeyHash holds the string denoting the api_key_hash field in the database. + FieldAPIKeyHash = "api_key_hash" + // FieldAPIKeyPrefix holds the string denoting the api_key_prefix field in the database. + FieldAPIKeyPrefix = "api_key_prefix" + // FieldAPIKeyLength holds the string denoting the api_key_length field in the database. + FieldAPIKeyLength = "api_key_length" + // FieldAPIKeyMaskPrefix holds the string denoting the api_key_mask_prefix field in the database. + FieldAPIKeyMaskPrefix = "api_key_mask_prefix" + // FieldAPIKeyMaskSuffix holds the string denoting the api_key_mask_suffix field in the database. + FieldAPIKeyMaskSuffix = "api_key_mask_suffix" // FieldCreatedAt holds the string denoting the created_at field in the database. FieldCreatedAt = "created_at" // FieldUpdatedAt holds the string denoting the updated_at field in the database. @@ -54,6 +64,11 @@ const ( var Columns = []string{ FieldID, FieldAPIKey, + FieldAPIKeyHash, + FieldAPIKeyPrefix, + FieldAPIKeyLength, + FieldAPIKeyMaskPrefix, + FieldAPIKeyMaskSuffix, FieldCreatedAt, FieldUpdatedAt, FieldTeamID, @@ -92,6 +107,31 @@ func ByAPIKey(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAPIKey, opts...).ToFunc() } +// ByAPIKeyHash orders the results by the api_key_hash field. +func ByAPIKeyHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyHash, opts...).ToFunc() +} + +// ByAPIKeyPrefix orders the results by the api_key_prefix field. +func ByAPIKeyPrefix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyPrefix, opts...).ToFunc() +} + +// ByAPIKeyLength orders the results by the api_key_length field. +func ByAPIKeyLength(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyLength, opts...).ToFunc() +} + +// ByAPIKeyMaskPrefix orders the results by the api_key_mask_prefix field. +func ByAPIKeyMaskPrefix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyMaskPrefix, opts...).ToFunc() +} + +// ByAPIKeyMaskSuffix orders the results by the api_key_mask_suffix field. +func ByAPIKeyMaskSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyMaskSuffix, opts...).ToFunc() +} + // ByCreatedAt orders the results by the created_at field. func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() diff --git a/packages/shared/pkg/models/teamapikey/where.go b/packages/shared/pkg/models/teamapikey/where.go index 0ebd7ac..7c90086 100644 --- a/packages/shared/pkg/models/teamapikey/where.go +++ b/packages/shared/pkg/models/teamapikey/where.go @@ -62,6 +62,31 @@ func APIKey(v string) predicate.TeamAPIKey { return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKey, v)) } +// APIKeyHash applies equality check predicate on the "api_key_hash" field. It's identical to APIKeyHashEQ. +func APIKeyHash(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyHash, v)) +} + +// APIKeyPrefix applies equality check predicate on the "api_key_prefix" field. It's identical to APIKeyPrefixEQ. +func APIKeyPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyPrefix, v)) +} + +// APIKeyLength applies equality check predicate on the "api_key_length" field. It's identical to APIKeyLengthEQ. +func APIKeyLength(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyLength, v)) +} + +// APIKeyMaskPrefix applies equality check predicate on the "api_key_mask_prefix" field. It's identical to APIKeyMaskPrefixEQ. +func APIKeyMaskPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskSuffix applies equality check predicate on the "api_key_mask_suffix" field. It's identical to APIKeyMaskSuffixEQ. +func APIKeyMaskSuffix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyMaskSuffix, v)) +} + // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.TeamAPIKey { return predicate.TeamAPIKey(sql.FieldEQ(FieldCreatedAt, v)) @@ -157,6 +182,306 @@ func APIKeyContainsFold(v string) predicate.TeamAPIKey { return predicate.TeamAPIKey(sql.FieldContainsFold(FieldAPIKey, v)) } +// APIKeyHashEQ applies the EQ predicate on the "api_key_hash" field. +func APIKeyHashEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyHash, v)) +} + +// APIKeyHashNEQ applies the NEQ predicate on the "api_key_hash" field. +func APIKeyHashNEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNEQ(FieldAPIKeyHash, v)) +} + +// APIKeyHashIn applies the In predicate on the "api_key_hash" field. +func APIKeyHashIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldIn(FieldAPIKeyHash, vs...)) +} + +// APIKeyHashNotIn applies the NotIn predicate on the "api_key_hash" field. +func APIKeyHashNotIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNotIn(FieldAPIKeyHash, vs...)) +} + +// APIKeyHashGT applies the GT predicate on the "api_key_hash" field. +func APIKeyHashGT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGT(FieldAPIKeyHash, v)) +} + +// APIKeyHashGTE applies the GTE predicate on the "api_key_hash" field. +func APIKeyHashGTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGTE(FieldAPIKeyHash, v)) +} + +// APIKeyHashLT applies the LT predicate on the "api_key_hash" field. +func APIKeyHashLT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLT(FieldAPIKeyHash, v)) +} + +// APIKeyHashLTE applies the LTE predicate on the "api_key_hash" field. +func APIKeyHashLTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLTE(FieldAPIKeyHash, v)) +} + +// APIKeyHashContains applies the Contains predicate on the "api_key_hash" field. +func APIKeyHashContains(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContains(FieldAPIKeyHash, v)) +} + +// APIKeyHashHasPrefix applies the HasPrefix predicate on the "api_key_hash" field. +func APIKeyHashHasPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasPrefix(FieldAPIKeyHash, v)) +} + +// APIKeyHashHasSuffix applies the HasSuffix predicate on the "api_key_hash" field. +func APIKeyHashHasSuffix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasSuffix(FieldAPIKeyHash, v)) +} + +// APIKeyHashEqualFold applies the EqualFold predicate on the "api_key_hash" field. +func APIKeyHashEqualFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEqualFold(FieldAPIKeyHash, v)) +} + +// APIKeyHashContainsFold applies the ContainsFold predicate on the "api_key_hash" field. +func APIKeyHashContainsFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContainsFold(FieldAPIKeyHash, v)) +} + +// APIKeyPrefixEQ applies the EQ predicate on the "api_key_prefix" field. +func APIKeyPrefixEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixNEQ applies the NEQ predicate on the "api_key_prefix" field. +func APIKeyPrefixNEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNEQ(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixIn applies the In predicate on the "api_key_prefix" field. +func APIKeyPrefixIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldIn(FieldAPIKeyPrefix, vs...)) +} + +// APIKeyPrefixNotIn applies the NotIn predicate on the "api_key_prefix" field. +func APIKeyPrefixNotIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNotIn(FieldAPIKeyPrefix, vs...)) +} + +// APIKeyPrefixGT applies the GT predicate on the "api_key_prefix" field. +func APIKeyPrefixGT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGT(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixGTE applies the GTE predicate on the "api_key_prefix" field. +func APIKeyPrefixGTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGTE(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixLT applies the LT predicate on the "api_key_prefix" field. +func APIKeyPrefixLT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLT(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixLTE applies the LTE predicate on the "api_key_prefix" field. +func APIKeyPrefixLTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLTE(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixContains applies the Contains predicate on the "api_key_prefix" field. +func APIKeyPrefixContains(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContains(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixHasPrefix applies the HasPrefix predicate on the "api_key_prefix" field. +func APIKeyPrefixHasPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasPrefix(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixHasSuffix applies the HasSuffix predicate on the "api_key_prefix" field. +func APIKeyPrefixHasSuffix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasSuffix(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixEqualFold applies the EqualFold predicate on the "api_key_prefix" field. +func APIKeyPrefixEqualFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEqualFold(FieldAPIKeyPrefix, v)) +} + +// APIKeyPrefixContainsFold applies the ContainsFold predicate on the "api_key_prefix" field. +func APIKeyPrefixContainsFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContainsFold(FieldAPIKeyPrefix, v)) +} + +// APIKeyLengthEQ applies the EQ predicate on the "api_key_length" field. +func APIKeyLengthEQ(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyLength, v)) +} + +// APIKeyLengthNEQ applies the NEQ predicate on the "api_key_length" field. +func APIKeyLengthNEQ(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNEQ(FieldAPIKeyLength, v)) +} + +// APIKeyLengthIn applies the In predicate on the "api_key_length" field. +func APIKeyLengthIn(vs ...int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldIn(FieldAPIKeyLength, vs...)) +} + +// APIKeyLengthNotIn applies the NotIn predicate on the "api_key_length" field. +func APIKeyLengthNotIn(vs ...int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNotIn(FieldAPIKeyLength, vs...)) +} + +// APIKeyLengthGT applies the GT predicate on the "api_key_length" field. +func APIKeyLengthGT(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGT(FieldAPIKeyLength, v)) +} + +// APIKeyLengthGTE applies the GTE predicate on the "api_key_length" field. +func APIKeyLengthGTE(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGTE(FieldAPIKeyLength, v)) +} + +// APIKeyLengthLT applies the LT predicate on the "api_key_length" field. +func APIKeyLengthLT(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLT(FieldAPIKeyLength, v)) +} + +// APIKeyLengthLTE applies the LTE predicate on the "api_key_length" field. +func APIKeyLengthLTE(v int) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLTE(FieldAPIKeyLength, v)) +} + +// APIKeyMaskPrefixEQ applies the EQ predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixNEQ applies the NEQ predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixNEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNEQ(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixIn applies the In predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldIn(FieldAPIKeyMaskPrefix, vs...)) +} + +// APIKeyMaskPrefixNotIn applies the NotIn predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixNotIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNotIn(FieldAPIKeyMaskPrefix, vs...)) +} + +// APIKeyMaskPrefixGT applies the GT predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixGT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGT(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixGTE applies the GTE predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixGTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGTE(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixLT applies the LT predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixLT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLT(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixLTE applies the LTE predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixLTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLTE(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixContains applies the Contains predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixContains(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContains(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixHasPrefix applies the HasPrefix predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixHasPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasPrefix(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixHasSuffix applies the HasSuffix predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixHasSuffix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasSuffix(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixEqualFold applies the EqualFold predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixEqualFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEqualFold(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskPrefixContainsFold applies the ContainsFold predicate on the "api_key_mask_prefix" field. +func APIKeyMaskPrefixContainsFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContainsFold(FieldAPIKeyMaskPrefix, v)) +} + +// APIKeyMaskSuffixEQ applies the EQ predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEQ(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixNEQ applies the NEQ predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixNEQ(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNEQ(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixIn applies the In predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldIn(FieldAPIKeyMaskSuffix, vs...)) +} + +// APIKeyMaskSuffixNotIn applies the NotIn predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixNotIn(vs ...string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldNotIn(FieldAPIKeyMaskSuffix, vs...)) +} + +// APIKeyMaskSuffixGT applies the GT predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixGT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGT(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixGTE applies the GTE predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixGTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldGTE(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixLT applies the LT predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixLT(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLT(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixLTE applies the LTE predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixLTE(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldLTE(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixContains applies the Contains predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixContains(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContains(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixHasPrefix applies the HasPrefix predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixHasPrefix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasPrefix(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixHasSuffix applies the HasSuffix predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixHasSuffix(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldHasSuffix(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixEqualFold applies the EqualFold predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixEqualFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldEqualFold(FieldAPIKeyMaskSuffix, v)) +} + +// APIKeyMaskSuffixContainsFold applies the ContainsFold predicate on the "api_key_mask_suffix" field. +func APIKeyMaskSuffixContainsFold(v string) predicate.TeamAPIKey { + return predicate.TeamAPIKey(sql.FieldContainsFold(FieldAPIKeyMaskSuffix, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.TeamAPIKey { return predicate.TeamAPIKey(sql.FieldEQ(FieldCreatedAt, v)) diff --git a/packages/shared/pkg/models/teamapikey_create.go b/packages/shared/pkg/models/teamapikey_create.go index 8815edb..fe590c2 100644 --- a/packages/shared/pkg/models/teamapikey_create.go +++ b/packages/shared/pkg/models/teamapikey_create.go @@ -32,6 +32,36 @@ func (takc *TeamAPIKeyCreate) SetAPIKey(s string) *TeamAPIKeyCreate { return takc } +// SetAPIKeyHash sets the "api_key_hash" field. +func (takc *TeamAPIKeyCreate) SetAPIKeyHash(s string) *TeamAPIKeyCreate { + takc.mutation.SetAPIKeyHash(s) + return takc +} + +// SetAPIKeyPrefix sets the "api_key_prefix" field. +func (takc *TeamAPIKeyCreate) SetAPIKeyPrefix(s string) *TeamAPIKeyCreate { + takc.mutation.SetAPIKeyPrefix(s) + return takc +} + +// SetAPIKeyLength sets the "api_key_length" field. +func (takc *TeamAPIKeyCreate) SetAPIKeyLength(i int) *TeamAPIKeyCreate { + takc.mutation.SetAPIKeyLength(i) + return takc +} + +// SetAPIKeyMaskPrefix sets the "api_key_mask_prefix" field. +func (takc *TeamAPIKeyCreate) SetAPIKeyMaskPrefix(s string) *TeamAPIKeyCreate { + takc.mutation.SetAPIKeyMaskPrefix(s) + return takc +} + +// SetAPIKeyMaskSuffix sets the "api_key_mask_suffix" field. +func (takc *TeamAPIKeyCreate) SetAPIKeyMaskSuffix(s string) *TeamAPIKeyCreate { + takc.mutation.SetAPIKeyMaskSuffix(s) + return takc +} + // SetCreatedAt sets the "created_at" field. func (takc *TeamAPIKeyCreate) SetCreatedAt(t time.Time) *TeamAPIKeyCreate { takc.mutation.SetCreatedAt(t) @@ -188,6 +218,21 @@ func (takc *TeamAPIKeyCreate) check() error { if _, ok := takc.mutation.APIKey(); !ok { return &ValidationError{Name: "api_key", err: errors.New(`models: missing required field "TeamAPIKey.api_key"`)} } + if _, ok := takc.mutation.APIKeyHash(); !ok { + return &ValidationError{Name: "api_key_hash", err: errors.New(`models: missing required field "TeamAPIKey.api_key_hash"`)} + } + if _, ok := takc.mutation.APIKeyPrefix(); !ok { + return &ValidationError{Name: "api_key_prefix", err: errors.New(`models: missing required field "TeamAPIKey.api_key_prefix"`)} + } + if _, ok := takc.mutation.APIKeyLength(); !ok { + return &ValidationError{Name: "api_key_length", err: errors.New(`models: missing required field "TeamAPIKey.api_key_length"`)} + } + if _, ok := takc.mutation.APIKeyMaskPrefix(); !ok { + return &ValidationError{Name: "api_key_mask_prefix", err: errors.New(`models: missing required field "TeamAPIKey.api_key_mask_prefix"`)} + } + if _, ok := takc.mutation.APIKeyMaskSuffix(); !ok { + return &ValidationError{Name: "api_key_mask_suffix", err: errors.New(`models: missing required field "TeamAPIKey.api_key_mask_suffix"`)} + } if _, ok := takc.mutation.CreatedAt(); !ok { return &ValidationError{Name: "created_at", err: errors.New(`models: missing required field "TeamAPIKey.created_at"`)} } @@ -241,6 +286,26 @@ func (takc *TeamAPIKeyCreate) createSpec() (*TeamAPIKey, *sqlgraph.CreateSpec) { _spec.SetField(teamapikey.FieldAPIKey, field.TypeString, value) _node.APIKey = value } + if value, ok := takc.mutation.APIKeyHash(); ok { + _spec.SetField(teamapikey.FieldAPIKeyHash, field.TypeString, value) + _node.APIKeyHash = value + } + if value, ok := takc.mutation.APIKeyPrefix(); ok { + _spec.SetField(teamapikey.FieldAPIKeyPrefix, field.TypeString, value) + _node.APIKeyPrefix = value + } + if value, ok := takc.mutation.APIKeyLength(); ok { + _spec.SetField(teamapikey.FieldAPIKeyLength, field.TypeInt, value) + _node.APIKeyLength = value + } + if value, ok := takc.mutation.APIKeyMaskPrefix(); ok { + _spec.SetField(teamapikey.FieldAPIKeyMaskPrefix, field.TypeString, value) + _node.APIKeyMaskPrefix = value + } + if value, ok := takc.mutation.APIKeyMaskSuffix(); ok { + _spec.SetField(teamapikey.FieldAPIKeyMaskSuffix, field.TypeString, value) + _node.APIKeyMaskSuffix = value + } if value, ok := takc.mutation.CreatedAt(); ok { _spec.SetField(teamapikey.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = value @@ -357,6 +422,18 @@ func (u *TeamAPIKeyUpsert) UpdateAPIKey() *TeamAPIKeyUpsert { return u } +// SetAPIKeyHash sets the "api_key_hash" field. +func (u *TeamAPIKeyUpsert) SetAPIKeyHash(v string) *TeamAPIKeyUpsert { + u.Set(teamapikey.FieldAPIKeyHash, v) + return u +} + +// UpdateAPIKeyHash sets the "api_key_hash" field to the value that was provided on create. +func (u *TeamAPIKeyUpsert) UpdateAPIKeyHash() *TeamAPIKeyUpsert { + u.SetExcluded(teamapikey.FieldAPIKeyHash) + return u +} + // SetUpdatedAt sets the "updated_at" field. func (u *TeamAPIKeyUpsert) SetUpdatedAt(v time.Time) *TeamAPIKeyUpsert { u.Set(teamapikey.FieldUpdatedAt, v) @@ -452,6 +529,18 @@ func (u *TeamAPIKeyUpsertOne) UpdateNewValues() *TeamAPIKeyUpsertOne { if _, exists := u.create.mutation.ID(); exists { s.SetIgnore(teamapikey.FieldID) } + if _, exists := u.create.mutation.APIKeyPrefix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyPrefix) + } + if _, exists := u.create.mutation.APIKeyLength(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyLength) + } + if _, exists := u.create.mutation.APIKeyMaskPrefix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyMaskPrefix) + } + if _, exists := u.create.mutation.APIKeyMaskSuffix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyMaskSuffix) + } if _, exists := u.create.mutation.CreatedAt(); exists { s.SetIgnore(teamapikey.FieldCreatedAt) } @@ -500,6 +589,20 @@ func (u *TeamAPIKeyUpsertOne) UpdateAPIKey() *TeamAPIKeyUpsertOne { }) } +// SetAPIKeyHash sets the "api_key_hash" field. +func (u *TeamAPIKeyUpsertOne) SetAPIKeyHash(v string) *TeamAPIKeyUpsertOne { + return u.Update(func(s *TeamAPIKeyUpsert) { + s.SetAPIKeyHash(v) + }) +} + +// UpdateAPIKeyHash sets the "api_key_hash" field to the value that was provided on create. +func (u *TeamAPIKeyUpsertOne) UpdateAPIKeyHash() *TeamAPIKeyUpsertOne { + return u.Update(func(s *TeamAPIKeyUpsert) { + s.UpdateAPIKeyHash() + }) +} + // SetUpdatedAt sets the "updated_at" field. func (u *TeamAPIKeyUpsertOne) SetUpdatedAt(v time.Time) *TeamAPIKeyUpsertOne { return u.Update(func(s *TeamAPIKeyUpsert) { @@ -774,6 +877,18 @@ func (u *TeamAPIKeyUpsertBulk) UpdateNewValues() *TeamAPIKeyUpsertBulk { if _, exists := b.mutation.ID(); exists { s.SetIgnore(teamapikey.FieldID) } + if _, exists := b.mutation.APIKeyPrefix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyPrefix) + } + if _, exists := b.mutation.APIKeyLength(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyLength) + } + if _, exists := b.mutation.APIKeyMaskPrefix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyMaskPrefix) + } + if _, exists := b.mutation.APIKeyMaskSuffix(); exists { + s.SetIgnore(teamapikey.FieldAPIKeyMaskSuffix) + } if _, exists := b.mutation.CreatedAt(); exists { s.SetIgnore(teamapikey.FieldCreatedAt) } @@ -823,6 +938,20 @@ func (u *TeamAPIKeyUpsertBulk) UpdateAPIKey() *TeamAPIKeyUpsertBulk { }) } +// SetAPIKeyHash sets the "api_key_hash" field. +func (u *TeamAPIKeyUpsertBulk) SetAPIKeyHash(v string) *TeamAPIKeyUpsertBulk { + return u.Update(func(s *TeamAPIKeyUpsert) { + s.SetAPIKeyHash(v) + }) +} + +// UpdateAPIKeyHash sets the "api_key_hash" field to the value that was provided on create. +func (u *TeamAPIKeyUpsertBulk) UpdateAPIKeyHash() *TeamAPIKeyUpsertBulk { + return u.Update(func(s *TeamAPIKeyUpsert) { + s.UpdateAPIKeyHash() + }) +} + // SetUpdatedAt sets the "updated_at" field. func (u *TeamAPIKeyUpsertBulk) SetUpdatedAt(v time.Time) *TeamAPIKeyUpsertBulk { return u.Update(func(s *TeamAPIKeyUpsert) { diff --git a/packages/shared/pkg/models/teamapikey_update.go b/packages/shared/pkg/models/teamapikey_update.go index 1447783..d1a5307 100644 --- a/packages/shared/pkg/models/teamapikey_update.go +++ b/packages/shared/pkg/models/teamapikey_update.go @@ -47,6 +47,20 @@ func (taku *TeamAPIKeyUpdate) SetNillableAPIKey(s *string) *TeamAPIKeyUpdate { return taku } +// SetAPIKeyHash sets the "api_key_hash" field. +func (taku *TeamAPIKeyUpdate) SetAPIKeyHash(s string) *TeamAPIKeyUpdate { + taku.mutation.SetAPIKeyHash(s) + return taku +} + +// SetNillableAPIKeyHash sets the "api_key_hash" field if the given value is not nil. +func (taku *TeamAPIKeyUpdate) SetNillableAPIKeyHash(s *string) *TeamAPIKeyUpdate { + if s != nil { + taku.SetAPIKeyHash(*s) + } + return taku +} + // SetUpdatedAt sets the "updated_at" field. func (taku *TeamAPIKeyUpdate) SetUpdatedAt(t time.Time) *TeamAPIKeyUpdate { taku.mutation.SetUpdatedAt(t) @@ -232,6 +246,9 @@ func (taku *TeamAPIKeyUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := taku.mutation.APIKey(); ok { _spec.SetField(teamapikey.FieldAPIKey, field.TypeString, value) } + if value, ok := taku.mutation.APIKeyHash(); ok { + _spec.SetField(teamapikey.FieldAPIKeyHash, field.TypeString, value) + } if value, ok := taku.mutation.UpdatedAt(); ok { _spec.SetField(teamapikey.FieldUpdatedAt, field.TypeTime, value) } @@ -347,6 +364,20 @@ func (takuo *TeamAPIKeyUpdateOne) SetNillableAPIKey(s *string) *TeamAPIKeyUpdate return takuo } +// SetAPIKeyHash sets the "api_key_hash" field. +func (takuo *TeamAPIKeyUpdateOne) SetAPIKeyHash(s string) *TeamAPIKeyUpdateOne { + takuo.mutation.SetAPIKeyHash(s) + return takuo +} + +// SetNillableAPIKeyHash sets the "api_key_hash" field if the given value is not nil. +func (takuo *TeamAPIKeyUpdateOne) SetNillableAPIKeyHash(s *string) *TeamAPIKeyUpdateOne { + if s != nil { + takuo.SetAPIKeyHash(*s) + } + return takuo +} + // SetUpdatedAt sets the "updated_at" field. func (takuo *TeamAPIKeyUpdateOne) SetUpdatedAt(t time.Time) *TeamAPIKeyUpdateOne { takuo.mutation.SetUpdatedAt(t) @@ -562,6 +593,9 @@ func (takuo *TeamAPIKeyUpdateOne) sqlSave(ctx context.Context) (_node *TeamAPIKe if value, ok := takuo.mutation.APIKey(); ok { _spec.SetField(teamapikey.FieldAPIKey, field.TypeString, value) } + if value, ok := takuo.mutation.APIKeyHash(); ok { + _spec.SetField(teamapikey.FieldAPIKeyHash, field.TypeString, value) + } if value, ok := takuo.mutation.UpdatedAt(); ok { _spec.SetField(teamapikey.FieldUpdatedAt, field.TypeTime, value) } diff --git a/packages/shared/pkg/models/tx.go b/packages/shared/pkg/models/tx.go index 675cb5d..5b13077 100644 --- a/packages/shared/pkg/models/tx.go +++ b/packages/shared/pkg/models/tx.go @@ -14,6 +14,8 @@ type Tx struct { config // AccessToken is the client for interacting with the AccessToken builders. AccessToken *AccessTokenClient + // Cluster is the client for interacting with the Cluster builders. + Cluster *ClusterClient // Env is the client for interacting with the Env builders. Env *EnvClient // EnvAlias is the client for interacting with the EnvAlias builders. @@ -164,6 +166,7 @@ func (tx *Tx) Client() *Client { func (tx *Tx) init() { tx.AccessToken = NewAccessTokenClient(tx.config) + tx.Cluster = NewClusterClient(tx.config) tx.Env = NewEnvClient(tx.config) tx.EnvAlias = NewEnvAliasClient(tx.config) tx.EnvBuild = NewEnvBuildClient(tx.config) diff --git a/packages/shared/pkg/models/user/user.go b/packages/shared/pkg/models/user/user.go index 2d72b0c..8f08f06 100644 --- a/packages/shared/pkg/models/user/user.go +++ b/packages/shared/pkg/models/user/user.go @@ -24,8 +24,6 @@ const ( EdgeCreatedAPIKeys = "created_api_keys" // EdgeUsersTeams holds the string denoting the users_teams edge name in mutations. EdgeUsersTeams = "users_teams" - // AccessTokenFieldID holds the string denoting the ID field of the AccessToken. - AccessTokenFieldID = "access_token" // Table holds the table name of the user in the database. Table = "users" // TeamsTable is the table that holds the teams relation/edge. The primary key declared below. @@ -189,7 +187,7 @@ func newCreatedEnvsStep() *sqlgraph.Step { func newAccessTokensStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AccessTokensInverseTable, AccessTokenFieldID), + sqlgraph.To(AccessTokensInverseTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, AccessTokensTable, AccessTokensColumn), ) } diff --git a/packages/shared/pkg/models/user_create.go b/packages/shared/pkg/models/user_create.go index 327c994..de07cac 100644 --- a/packages/shared/pkg/models/user_create.go +++ b/packages/shared/pkg/models/user_create.go @@ -71,14 +71,14 @@ func (uc *UserCreate) AddCreatedEnvs(e ...*Env) *UserCreate { } // AddAccessTokenIDs adds the "access_tokens" edge to the AccessToken entity by IDs. -func (uc *UserCreate) AddAccessTokenIDs(ids ...string) *UserCreate { +func (uc *UserCreate) AddAccessTokenIDs(ids ...uuid.UUID) *UserCreate { uc.mutation.AddAccessTokenIDs(ids...) return uc } // AddAccessTokens adds the "access_tokens" edges to the AccessToken entity. func (uc *UserCreate) AddAccessTokens(a ...*AccessToken) *UserCreate { - ids := make([]string, len(a)) + ids := make([]uuid.UUID, len(a)) for i := range a { ids[i] = a[i].ID } @@ -244,7 +244,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uc.schemaConfig.AccessToken diff --git a/packages/shared/pkg/models/user_update.go b/packages/shared/pkg/models/user_update.go index c7558b6..cb642ff 100644 --- a/packages/shared/pkg/models/user_update.go +++ b/packages/shared/pkg/models/user_update.go @@ -80,14 +80,14 @@ func (uu *UserUpdate) AddCreatedEnvs(e ...*Env) *UserUpdate { } // AddAccessTokenIDs adds the "access_tokens" edge to the AccessToken entity by IDs. -func (uu *UserUpdate) AddAccessTokenIDs(ids ...string) *UserUpdate { +func (uu *UserUpdate) AddAccessTokenIDs(ids ...uuid.UUID) *UserUpdate { uu.mutation.AddAccessTokenIDs(ids...) return uu } // AddAccessTokens adds the "access_tokens" edges to the AccessToken entity. func (uu *UserUpdate) AddAccessTokens(a ...*AccessToken) *UserUpdate { - ids := make([]string, len(a)) + ids := make([]uuid.UUID, len(a)) for i := range a { ids[i] = a[i].ID } @@ -178,14 +178,14 @@ func (uu *UserUpdate) ClearAccessTokens() *UserUpdate { } // RemoveAccessTokenIDs removes the "access_tokens" edge to AccessToken entities by IDs. -func (uu *UserUpdate) RemoveAccessTokenIDs(ids ...string) *UserUpdate { +func (uu *UserUpdate) RemoveAccessTokenIDs(ids ...uuid.UUID) *UserUpdate { uu.mutation.RemoveAccessTokenIDs(ids...) return uu } // RemoveAccessTokens removes "access_tokens" edges to AccessToken entities. func (uu *UserUpdate) RemoveAccessTokens(a ...*AccessToken) *UserUpdate { - ids := make([]string, len(a)) + ids := make([]uuid.UUID, len(a)) for i := range a { ids[i] = a[i].ID } @@ -408,7 +408,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uu.schemaConfig.AccessToken @@ -422,7 +422,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uu.schemaConfig.AccessToken @@ -439,7 +439,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uu.schemaConfig.AccessToken @@ -613,14 +613,14 @@ func (uuo *UserUpdateOne) AddCreatedEnvs(e ...*Env) *UserUpdateOne { } // AddAccessTokenIDs adds the "access_tokens" edge to the AccessToken entity by IDs. -func (uuo *UserUpdateOne) AddAccessTokenIDs(ids ...string) *UserUpdateOne { +func (uuo *UserUpdateOne) AddAccessTokenIDs(ids ...uuid.UUID) *UserUpdateOne { uuo.mutation.AddAccessTokenIDs(ids...) return uuo } // AddAccessTokens adds the "access_tokens" edges to the AccessToken entity. func (uuo *UserUpdateOne) AddAccessTokens(a ...*AccessToken) *UserUpdateOne { - ids := make([]string, len(a)) + ids := make([]uuid.UUID, len(a)) for i := range a { ids[i] = a[i].ID } @@ -711,14 +711,14 @@ func (uuo *UserUpdateOne) ClearAccessTokens() *UserUpdateOne { } // RemoveAccessTokenIDs removes the "access_tokens" edge to AccessToken entities by IDs. -func (uuo *UserUpdateOne) RemoveAccessTokenIDs(ids ...string) *UserUpdateOne { +func (uuo *UserUpdateOne) RemoveAccessTokenIDs(ids ...uuid.UUID) *UserUpdateOne { uuo.mutation.RemoveAccessTokenIDs(ids...) return uuo } // RemoveAccessTokens removes "access_tokens" edges to AccessToken entities. func (uuo *UserUpdateOne) RemoveAccessTokens(a ...*AccessToken) *UserUpdateOne { - ids := make([]string, len(a)) + ids := make([]uuid.UUID, len(a)) for i := range a { ids[i] = a[i].ID } @@ -971,7 +971,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uuo.schemaConfig.AccessToken @@ -985,7 +985,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uuo.schemaConfig.AccessToken @@ -1002,7 +1002,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AccessTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeString), + IDSpec: sqlgraph.NewFieldSpec(accesstoken.FieldID, field.TypeUUID), }, } edge.Schema = uuo.schemaConfig.AccessToken diff --git a/packages/shared/pkg/proxy/handler.go b/packages/shared/pkg/proxy/handler.go new file mode 100644 index 0000000..d476306 --- /dev/null +++ b/packages/shared/pkg/proxy/handler.go @@ -0,0 +1,92 @@ +package proxy + +import ( + "context" + "errors" + "fmt" + "net/http" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/pool" + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/template" +) + +type ErrInvalidHost struct{} + +func (e *ErrInvalidHost) Error() string { + return "invalid url host" +} + +type ErrInvalidSandboxPort struct{} + +func (e *ErrInvalidSandboxPort) Error() string { + return "invalid sandbox port" +} + +func NewErrSandboxNotFound(sandboxId string) *ErrSandboxNotFound { + return &ErrSandboxNotFound{ + SandboxId: sandboxId, + } +} + +type ErrSandboxNotFound struct { + SandboxId string +} + +func (e *ErrSandboxNotFound) Error() string { + return "sandbox not found" +} + +func handler(p *pool.ProxyPool, getDestination func(r *http.Request) (*pool.Destination, error)) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d, err := getDestination(r) + + var invalidHostErr *ErrInvalidHost + if errors.As(err, &invalidHostErr) { + zap.L().Warn("invalid host", zap.String("host", r.Host)) + http.Error(w, "Invalid host", http.StatusBadRequest) + + return + } + + var invalidPortErr *ErrInvalidSandboxPort + if errors.As(err, &invalidPortErr) { + zap.L().Warn("invalid sandbox port", zap.String("host", r.Host)) + http.Error(w, "Invalid sandbox port", http.StatusBadRequest) + + return + } + + var notFoundErr *ErrSandboxNotFound + if errors.As(err, ¬FoundErr) { + zap.L().Warn("sandbox not found", zap.String("host", r.Host)) + + err := template. + NewSandboxNotFoundError(notFoundErr.SandboxId, r.Host). + HandleError(w, r) + if err != nil { + zap.L().Error("failed to handle sandbox not found error", zap.Error(err)) + http.Error(w, "Failed to handle sandbox not found error", http.StatusInternalServerError) + + return + } + + return + } + + if err != nil { + zap.L().Error("failed to route request", zap.Error(err), zap.String("host", r.Host)) + http.Error(w, fmt.Sprintf("Unexpected error when routing request: %s", err), http.StatusInternalServerError) + + return + } + + d.RequestLogger.Debug("proxying request") + + ctx := context.WithValue(r.Context(), pool.DestinationContextKey{}, d) + + proxy := p.Get(d) + proxy.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/packages/shared/pkg/proxy/host.go b/packages/shared/pkg/proxy/host.go new file mode 100644 index 0000000..43b91d6 --- /dev/null +++ b/packages/shared/pkg/proxy/host.go @@ -0,0 +1,23 @@ +package proxy + +import ( + "strconv" + "strings" +) + +func ParseHost(host string) (sandboxID string, port uint64, err error) { + hostParts := strings.Split(host, "-") + if len(hostParts) < 2 { + return "", 0, &ErrInvalidHost{} + } + + sandboxPortString := hostParts[0] + sandboxID = hostParts[1] + + sandboxPort, err := strconv.ParseUint(sandboxPortString, 10, 64) + if err != nil { + return "", 0, &ErrInvalidSandboxPort{} + } + + return sandboxID, sandboxPort, nil +} diff --git a/packages/shared/pkg/proxy/pool/client.go b/packages/shared/pkg/proxy/pool/client.go new file mode 100644 index 0000000..1ca4f55 --- /dev/null +++ b/packages/shared/pkg/proxy/pool/client.go @@ -0,0 +1,141 @@ +package pool + +import ( + "context" + "log" + "net" + "net/http" + "net/http/httputil" + "sync/atomic" + "time" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/template" + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/tracking" +) + +type proxyClient struct { + httputil.ReverseProxy + transport *http.Transport +} + +func newProxyClient( + maxIdleConns, + maxHostIdleConns int, + idleTimeout time.Duration, + totalConnsCounter *atomic.Uint64, + currentConnsCounter *atomic.Int64, + logger *log.Logger, +) *proxyClient { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + // Limit the max connection per host to avoid exhausting the number of available ports to one host. + MaxIdleConnsPerHost: maxHostIdleConns, + MaxIdleConns: maxIdleConns, + IdleConnTimeout: idleTimeout, + TLSHandshakeTimeout: 0, + ResponseHeaderTimeout: 0, + // TCP configuration + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := (&net.Dialer{ + Timeout: 30 * time.Second, // Connect timeout (no timeout by default) + KeepAlive: 20 * time.Second, // Lower than our http keepalives (50 seconds) + }).DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + + totalConnsCounter.Add(1) + + return tracking.NewConnection(conn, currentConnsCounter), nil + }, + DisableCompression: true, // No need to request or manipulate compression + } + + return &proxyClient{ + transport: transport, + ReverseProxy: httputil.ReverseProxy{ + Transport: transport, + Rewrite: func(r *httputil.ProxyRequest) { + t, ok := r.In.Context().Value(DestinationContextKey{}).(*Destination) + if !ok { + zap.L().Error("failed to get routing destination from context") + + // Error from this will be later caught as r.Host == "" in the ErrorHandler + r.SetURL(r.In.URL) + + return + } + + r.SetURL(t.Url) + // We are **not** using SetXForwarded() because servers can sometimes modify the content-location header to be http which might break some customer services. + r.Out.Host = r.In.Host + }, + ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { + if r.Host == "" { + zap.L().Error("error handler called from rewrite because of missing DestinationContext", zap.Error(err)) + + http.Error(w, "Failed to route request to sandbox", http.StatusInternalServerError) + + return + } + + t, ok := r.Context().Value(DestinationContextKey{}).(*Destination) + if !ok { + zap.L().Error("failed to get routing destination from context") + + http.Error(w, "Failed to route request to sandbox", http.StatusInternalServerError) + + return + } + + if t.DefaultToPortError { + err = template. + NewPortClosedError(t.SandboxId, r.Host, t.SandboxPort). + HandleError(w, r) + if err != nil { + zap.L().Error("failed to handle error", zap.Error(err)) + + http.Error(w, "Failed to handle closed port error", http.StatusInternalServerError) + + return + } + + return + } + + zap.L().Error("sandbox error handler called", zap.Error(err)) + + http.Error(w, "Failed to route request to sandbox", http.StatusBadGateway) + }, + ModifyResponse: func(r *http.Response) error { + t, ok := r.Request.Context().Value(DestinationContextKey{}).(*Destination) + if !ok { + zap.L().Error("failed to get routing target from context") + + return nil + } + + if r.StatusCode >= 500 { + t.RequestLogger.Error( + "Reverse proxy error", + zap.Int("status_code", r.StatusCode), + ) + } else { + t.RequestLogger.Debug("Reverse proxy response", + zap.Int("status_code", r.StatusCode), + ) + } + + return nil + }, + // Ideally we would add info about sandbox to each error log, but there is no easy way right now. + ErrorLog: logger, + }, + } +} + +func (p *proxyClient) closeIdleConnections() { + p.transport.CloseIdleConnections() +} diff --git a/packages/shared/pkg/proxy/pool/destination.go b/packages/shared/pkg/proxy/pool/destination.go new file mode 100644 index 0000000..0ddfa9a --- /dev/null +++ b/packages/shared/pkg/proxy/pool/destination.go @@ -0,0 +1,23 @@ +package pool + +import ( + "net/url" + + "go.uber.org/zap" +) + +type DestinationContextKey struct{} + +// Destination contains information about where to route the request. +type Destination struct { + Url *url.URL + SandboxId string + SandboxPort uint64 + // Should we return the error about closed port if there is a problem with a connection to upstream? + DefaultToPortError bool + RequestLogger *zap.Logger + // ConnectionKey is used for identifying which keepalive connections are not the same so we can prevent unintended reuse. + // This is evaluated before checking for existing connection to the IP:port pair. + ConnectionKey string + IncludeSandboxIdInProxyErrorLogger bool +} diff --git a/packages/shared/pkg/proxy/pool/pool.go b/packages/shared/pkg/proxy/pool/pool.go new file mode 100644 index 0000000..45558ad --- /dev/null +++ b/packages/shared/pkg/proxy/pool/pool.go @@ -0,0 +1,90 @@ +package pool + +import ( + "sync/atomic" + "time" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + "github.com/e2b-dev/infra/packages/shared/pkg/smap" +) + +// hostConnectionSplit is used for splitting the total number of connections between the hosts. +// This is used to limit the number of connections per host to avoid exhausting the number of available via one host. +// The total number of connection per host will be total connections / hostConnectionSplit. +// If the total connections is lower than hostConnectionSplit, the total connections will be used for each host. +const hostConnectionSplit = 4 + +type ProxyPool struct { + pool *smap.Map[*proxyClient] + sizePerConnectionKey int + maxClientConns int + idleTimeout time.Duration + totalConnsCounter atomic.Uint64 + currentConnsCounter atomic.Int64 +} + +func New(maxClientConns int, idleTimeout time.Duration) *ProxyPool { + return &ProxyPool{ + pool: smap.New[*proxyClient](), + maxClientConns: maxClientConns, + idleTimeout: idleTimeout, + } +} + +func (p *ProxyPool) Get(d *Destination) *proxyClient { + return p.pool.Upsert(d.ConnectionKey, nil, func(exist bool, inMapValue *proxyClient, newValue *proxyClient) *proxyClient { + if exist && inMapValue != nil { + return inMapValue + } + + withFields := make([]zap.Field, 0) + if d.IncludeSandboxIdInProxyErrorLogger { + withFields = append(withFields, logger.WithSandboxID(d.SandboxId)) + } + + logger, err := zap.NewStdLogAt(zap.L().With(withFields...), zap.ErrorLevel) + if err != nil { + zap.L().Warn("failed to create logger", zap.Error(err)) + } + + return newProxyClient( + p.maxClientConns, + // We limit the max number of connections per host to avoid exhausting the number of available via one host. + func() int { + if p.maxClientConns <= hostConnectionSplit { + return p.maxClientConns + } + + return p.maxClientConns / hostConnectionSplit + }(), + p.idleTimeout, + &p.totalConnsCounter, + &p.currentConnsCounter, + logger, + ) + }) +} + +func (p *ProxyPool) Close(connectionKey string) { + p.pool.RemoveCb(connectionKey, func(key string, proxy *proxyClient, exists bool) bool { + if proxy != nil { + proxy.closeIdleConnections() + } + + return true + }) +} + +func (p *ProxyPool) TotalConnections() uint64 { + return p.totalConnsCounter.Load() +} + +func (p *ProxyPool) CurrentConnections() int64 { + return p.currentConnsCounter.Load() +} + +func (p *ProxyPool) Size() int { + return p.pool.Count() +} diff --git a/packages/shared/pkg/proxy/proxy.go b/packages/shared/pkg/proxy/proxy.go new file mode 100644 index 0000000..5f610f0 --- /dev/null +++ b/packages/shared/pkg/proxy/proxy.go @@ -0,0 +1,81 @@ +package proxy + +import ( + "fmt" + "net" + "net/http" + "sync/atomic" + "time" + + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/pool" + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/tracking" +) + +const ( + maxClientConns = 16384 // Reasonably big number that is lower than the number of available ports. + idleTimeoutBufferUpstreamDownstream = 10 +) + +type Proxy struct { + http.Server + pool *pool.ProxyPool + currentServerConnsCounter atomic.Int64 +} + +func New( + port uint, + idleTimeout time.Duration, + getDestination func(r *http.Request) (*pool.Destination, error), +) *Proxy { + p := pool.New( + maxClientConns, + idleTimeout, + ) + + return &Proxy{ + Server: http.Server{ + Addr: fmt.Sprintf(":%d", port), + ReadTimeout: 0, + WriteTimeout: 0, + // Downstream idle timeout (client facing) > upstream idle timeout (server facing) + // otherwise there's a chance for a race condition when the server closes and the client tries to use the connection + IdleTimeout: idleTimeout + idleTimeoutBufferUpstreamDownstream, + ReadHeaderTimeout: 0, + Handler: handler(p, getDestination), + }, + pool: p, + } +} + +func (p *Proxy) TotalPoolConnections() uint64 { + return p.pool.TotalConnections() +} + +func (p *Proxy) CurrentServerConnections() int64 { + return p.currentServerConnsCounter.Load() +} + +func (p *Proxy) CurrentPoolSize() int { + return p.pool.Size() +} + +func (p *Proxy) CurrentPoolConnections() int64 { + return p.pool.CurrentConnections() +} + +func (p *Proxy) RemoveFromPool(connectionKey string) { + p.pool.Close(connectionKey) +} + +func (p *Proxy) ListenAndServe() error { + l, err := net.Listen("tcp", p.Addr) + if err != nil { + return err + } + + return p.Serve(l) +} + +func (p *Proxy) Serve(l net.Listener) error { + return p.Server.Serve(tracking.NewListener(l, &p.currentServerConnsCounter)) +} diff --git a/packages/shared/pkg/proxy/proxy_test.go b/packages/shared/pkg/proxy/proxy_test.go new file mode 100644 index 0000000..da4c75e --- /dev/null +++ b/packages/shared/pkg/proxy/proxy_test.go @@ -0,0 +1,407 @@ +package proxy + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sync" + "sync/atomic" + "testing" + "time" + + "go.uber.org/zap" + "gotest.tools/assert" + + "github.com/e2b-dev/infra/packages/shared/pkg/proxy/pool" +) + +// testBackend represents a test backend server +type testBackend struct { + server *http.Server + listener net.Listener + url *url.URL + requestCount *atomic.Uint64 + id string + cancel context.CancelFunc +} + +func (b *testBackend) RequestCount() uint64 { + return b.requestCount.Load() +} + +// newTestBackend creates a new test backend server +func newTestBackend(listener net.Listener, id string) (*testBackend, error) { + var requestCount atomic.Uint64 + + ctx, cancel := context.WithCancel(context.Background()) + + backend := &testBackend{ + server: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + select { + case <-ctx.Done(): + w.WriteHeader(http.StatusBadGateway) + + return + default: + } + + requestCount.Add(1) + + w.WriteHeader(http.StatusOK) + w.Write([]byte(id)) + }), + }, + listener: listener, + requestCount: &requestCount, + id: id, + cancel: cancel, + } + + // Start the server + go backend.server.Serve(backend.listener) + + // Parse the URL + backendURL, err := url.Parse(fmt.Sprintf("http://%s", listener.Addr().String())) + if err != nil { + listener.Close() + return nil, fmt.Errorf("failed to parse backend URL: %v", err) + } + backend.url = backendURL + + return backend, nil +} + +// Interrupt closes the listener. +// We close the listener directly because we want to simulate ungraceful shutdown of the backend +// that happens when a sandbox is killed. +func (b *testBackend) Interrupt() error { + var errs []error + err := b.listener.Close() + if err != nil { + errs = append(errs, err) + } + + b.cancel() + + return errors.Join(errs...) +} + +func (b *testBackend) Close() error { + return b.server.Close() +} + +func assertBackendOutput(t *testing.T, backend *testBackend, resp *http.Response) { + assert.Equal(t, resp.StatusCode, http.StatusOK, "status code should be 200") + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + assert.Equal(t, string(body), backend.id, "backend id should be the same") +} + +// newTestProxy creates a new proxy server for testing +func newTestProxy(getDestination func(r *http.Request) (*pool.Destination, error)) (*Proxy, uint, error) { + // Find a free port for the proxy + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, 0, fmt.Errorf("failed to get free port: %v", err) + } + port := l.Addr().(*net.TCPAddr).Port + + // Set up the proxy server + proxy := New( + uint(port), + 20*time.Second, // Short idle timeout + getDestination, + ) + + // Start the proxy server + go func() { + proxy.Serve(l) + }() + + return proxy, uint(port), nil +} + +func TestProxyRoutesToTargetServer(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + backend, err := newTestBackend(listener, "backend-1") + if err != nil { + t.Fatalf("failed to create backend: %v", err) + } + defer backend.Close() + + // Set up a routing function that always returns the backend + getDestination := func(r *http.Request) (*pool.Destination, error) { + return &pool.Destination{ + Url: backend.url, + SandboxId: "test-sandbox", + RequestLogger: zap.NewNop(), + ConnectionKey: backend.id, + }, nil + } + + proxy, port, err := newTestProxy(getDestination) + if err != nil { + t.Fatalf("failed to create proxy: %v", err) + } + defer proxy.Close() + + assert.Equal(t, proxy.TotalPoolConnections(), uint64(0)) + assert.Equal(t, backend.RequestCount(), uint64(0)) + + // Make a request to the proxy + proxyURL := fmt.Sprintf("http://127.0.0.1:%d/hello", port) + resp, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy: %v", err) + } + defer resp.Body.Close() + + assertBackendOutput(t, backend, resp) + + assert.Equal(t, backend.RequestCount(), uint64(1), "backend should have been called once") + assert.Equal(t, proxy.TotalPoolConnections(), uint64(1), "proxy should have established one connection") +} + +func TestProxyReusesConnections(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + backend, err := newTestBackend(listener, "backend-1") + if err != nil { + t.Fatalf("failed to create backend: %v", err) + } + defer backend.Close() + + // Set up a routing function that always returns the backend + getDestination := func(r *http.Request) (*pool.Destination, error) { + return &pool.Destination{ + Url: backend.url, + SandboxId: "test-sandbox", + RequestLogger: zap.NewNop(), + ConnectionKey: backend.id, + }, nil + } + + proxy, port, err := newTestProxy(getDestination) + if err != nil { + t.Fatalf("failed to create proxy: %v", err) + } + defer proxy.Close() + + // Make two requests to the proxy + proxyURL := fmt.Sprintf("http://127.0.0.1:%d/hello", port) + + // First request + resp1, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (first request): %v", err) + } + defer resp1.Body.Close() + + assertBackendOutput(t, backend, resp1) + + // Second request + resp2, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (second request): %v", err) + } + defer resp2.Body.Close() + + assertBackendOutput(t, backend, resp2) + + // Verify that only one connection was established + assert.Equal(t, backend.RequestCount(), uint64(2), "backend should have been called twice") + assert.Equal(t, proxy.TotalPoolConnections(), uint64(1), "proxy should have used one connection") +} + +// This is a test that verify that the proxy reuse fails when the backend changes. +func TestProxyReuseConnectionsWhenBackendChangesFails(t *testing.T) { + // Create first backend + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + backend1, err := newTestBackend(listener, "backend-1") + if err != nil { + t.Fatalf("failed to create first backend: %v", err) + } + defer backend1.Close() + + // Get the address of the first backend + backendAddr := backend1.listener.Addr().String() + + backendMapping := map[string]string{ + backendAddr: backend1.id, + } + var backendMappingMutex sync.Mutex + + // Set up a routing function that returns the current backend + getDestination := func(r *http.Request) (*pool.Destination, error) { + backendMappingMutex.Lock() + defer backendMappingMutex.Unlock() + + backendKey, ok := backendMapping[backendAddr] + if !ok { + return nil, fmt.Errorf("backend not found") + } + + return &pool.Destination{ + Url: backend1.url, + SandboxId: "backend1", + RequestLogger: zap.NewNop(), + ConnectionKey: backendKey, + }, nil + } + + // Create proxy with the initial routing function + proxy, port, err := newTestProxy(getDestination) + if err != nil { + t.Fatalf("failed to create proxy: %v", err) + } + defer proxy.Close() + + proxyURL := fmt.Sprintf("http://127.0.0.1:%d/hello", port) + + // Make request to first backend + resp1, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (first request): %v", err) + } + defer resp1.Body.Close() + + assertBackendOutput(t, backend1, resp1) + + assert.Equal(t, proxy.TotalPoolConnections(), uint64(1), "proxy should have used one connection") + assert.Equal(t, backend1.RequestCount(), uint64(1), "first backend should have been called once") + + // Close the first backend + backend1.Interrupt() + + // Create second backend on the same address + listener, err = net.Listen("tcp", backendAddr) + if err != nil { + t.Fatalf("failed to create listener for second backend: %v", err) + } + + backend2, err := newTestBackend(listener, "backend-2") + if err != nil { + t.Fatalf("failed to create second backend: %v", err) + } + defer backend2.Close() + + // Make request to second backend + resp2, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (second request): %v", err) + } + defer resp2.Body.Close() + + assert.Equal(t, resp2.StatusCode, http.StatusBadGateway, "status code should be 502") +} + +func TestProxyDoesNotReuseConnectionsWhenBackendChanges(t *testing.T) { + // Create first backend + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("failed to create listener: %v", err) + } + + backend1, err := newTestBackend(listener, "backend-1") + if err != nil { + t.Fatalf("failed to create first backend: %v", err) + } + defer backend1.Close() + + // Get the address of the first backend + backendAddr := backend1.listener.Addr().String() + + backendMapping := map[string]string{ + backendAddr: backend1.id, + } + var backendMappingMutex sync.Mutex + + // Set up a routing function that returns the current backend + getDestination := func(r *http.Request) (*pool.Destination, error) { + backendMappingMutex.Lock() + defer backendMappingMutex.Unlock() + + backendKey, ok := backendMapping[backendAddr] + if !ok { + return nil, fmt.Errorf("backend not found") + } + + return &pool.Destination{ + Url: backend1.url, + SandboxId: "backend1", + RequestLogger: zap.NewNop(), + ConnectionKey: backendKey, + }, nil + } + + // Create proxy with the initial routing function + proxy, port, err := newTestProxy(getDestination) + if err != nil { + t.Fatalf("failed to create proxy: %v", err) + } + defer proxy.Close() + + proxyURL := fmt.Sprintf("http://127.0.0.1:%d/hello", port) + + // Make request to first backend + resp1, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (first request): %v", err) + } + defer resp1.Body.Close() + + assertBackendOutput(t, backend1, resp1) + + assert.Equal(t, proxy.TotalPoolConnections(), uint64(1), "proxy should have reused the connection") + assert.Equal(t, backend1.RequestCount(), uint64(1), "first backend should have been called once") + + // Close the first backend + backend1.Interrupt() + + // Create second backend on the same address + listener, err = net.Listen("tcp", backendAddr) + if err != nil { + t.Fatalf("failed to create listener for second backend: %v", err) + } + + backend2, err := newTestBackend(listener, "backend-2") + if err != nil { + t.Fatalf("failed to create second backend: %v", err) + } + defer backend2.Close() + + backendMappingMutex.Lock() + backendMapping[backend2.listener.Addr().String()] = backend2.id + backendMappingMutex.Unlock() + + // Make request to second backend + resp2, err := http.Get(proxyURL) + if err != nil { + t.Fatalf("failed to GET from proxy (second request): %v", err) + } + defer resp2.Body.Close() + + assertBackendOutput(t, backend2, resp2) + + assert.Equal(t, backend2.RequestCount(), uint64(1), "second backend should have been called once") + assert.Equal(t, backend1.RequestCount(), uint64(1), "first backend should have been called once") + assert.Equal(t, proxy.TotalPoolConnections(), uint64(2), "proxy should not have reused the connection") +} diff --git a/packages/shared/pkg/proxy/template/browser_port_closed.html b/packages/shared/pkg/proxy/template/browser_port_closed.html new file mode 100644 index 0000000..f8f5491 --- /dev/null +++ b/packages/shared/pkg/proxy/template/browser_port_closed.html @@ -0,0 +1,23 @@ + + + + + Closed Port Error + + + +
+ +
+

Closed Port Error

+

The sandbox {{.SandboxId}} is running but there's no service running on port {{.Port}}.

+
+
+ {{.Host}} +
Connection refused on port {{.Port}}
+
+

Please ensure that your service is properly configured and running on the specified port.

+ Check the sandbox logs for more information → +
+ + \ No newline at end of file diff --git a/packages/shared/pkg/proxy/template/browser_sandbox_not_found.html b/packages/shared/pkg/proxy/template/browser_sandbox_not_found.html new file mode 100644 index 0000000..39da36e --- /dev/null +++ b/packages/shared/pkg/proxy/template/browser_sandbox_not_found.html @@ -0,0 +1,17 @@ + + + + + Sandbox Not Found + + + +
+ +
+

Sandbox Not Found

+

The sandbox {{.SandboxId}} wasn't found.

+
+
+ + \ No newline at end of file diff --git a/packages/shared/pkg/proxy/template/port_closed.go b/packages/shared/pkg/proxy/template/port_closed.go new file mode 100644 index 0000000..d0a28fe --- /dev/null +++ b/packages/shared/pkg/proxy/template/port_closed.go @@ -0,0 +1,36 @@ +package template + +import ( + _ "embed" + "html/template" + "net/http" +) + +//go:embed browser_port_closed.html +var portClosedHtml string +var portClosedHtmlTemplate = template.Must(template.New("portClosedHtml").Parse(portClosedHtml)) + +type portClosedError struct { + SandboxId string `json:"sandboxId"` + Message string `json:"message"` + Port uint64 `json:"port"` + Code int `json:"code"` + Host string `json:"-"` +} + +func (e portClosedError) StatusCode() int { + return e.Code +} + +func NewPortClosedError(sandboxId, host string, port uint64) *TemplatedError[portClosedError] { + return &TemplatedError[portClosedError]{ + template: portClosedHtmlTemplate, + vars: portClosedError{ + Message: "The sandbox is running but port is not open", + SandboxId: sandboxId, + Host: host, + Port: port, + Code: http.StatusBadGateway, + }, + } +} diff --git a/packages/shared/pkg/proxy/template/sandbox_not_found.go b/packages/shared/pkg/proxy/template/sandbox_not_found.go new file mode 100644 index 0000000..f066da9 --- /dev/null +++ b/packages/shared/pkg/proxy/template/sandbox_not_found.go @@ -0,0 +1,34 @@ +package template + +import ( + _ "embed" + "html/template" + "net/http" +) + +//go:embed browser_sandbox_not_found.html +var sandboxNotFoundHtml string +var sandboxNotFoundHtmlTemplate = template.Must(template.New("sandboxNotFoundHtml").Parse(sandboxNotFoundHtml)) + +type sandboxNotFoundData struct { + SandboxId string `json:"sandboxId"` + Message string `json:"message"` + Code int `json:"code"` + Host string `json:"-"` +} + +func (e sandboxNotFoundData) StatusCode() int { + return e.Code +} + +func NewSandboxNotFoundError(sandboxId, host string) *TemplatedError[sandboxNotFoundData] { + return &TemplatedError[sandboxNotFoundData]{ + template: sandboxNotFoundHtmlTemplate, + vars: sandboxNotFoundData{ + SandboxId: sandboxId, + Message: "The sandbox was not found", + Host: host, + Code: http.StatusBadGateway, + }, + } +} diff --git a/packages/shared/pkg/proxy/template/template.go b/packages/shared/pkg/proxy/template/template.go new file mode 100644 index 0000000..1a4ed83 --- /dev/null +++ b/packages/shared/pkg/proxy/template/template.go @@ -0,0 +1,80 @@ +package template + +import ( + "bytes" + "encoding/json" + "fmt" + "html/template" + "net/http" + "regexp" +) + +var browserRegex = regexp.MustCompile(`(?i)mozilla|chrome|safari|firefox|edge|opera|msie`) + +type jsonErrorMessage interface { + StatusCode() int +} + +type TemplatedError[T jsonErrorMessage] struct { + template *template.Template + vars T +} + +func (e *TemplatedError[T]) buildHtml() ([]byte, error) { + html := new(bytes.Buffer) + + err := e.template.Execute(html, e.vars) + if err != nil { + return nil, err + } + + return html.Bytes(), nil +} + +func (e *TemplatedError[T]) buildJson() ([]byte, error) { + return json.Marshal(e.vars) +} + +func (e *TemplatedError[T]) HandleError( + w http.ResponseWriter, + r *http.Request, +) error { + if e.vars.StatusCode() <= 0 { + return fmt.Errorf("invalid status code: %d", e.vars.StatusCode()) + } + + if isBrowser(r) { + body, buildErr := e.buildHtml() + if buildErr != nil { + return buildErr + } + + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(e.vars.StatusCode()) + _, writeErr := w.Write(body) + if writeErr != nil { + return writeErr + } + + return nil + } + + body, buildErr := e.buildJson() + if buildErr != nil { + return buildErr + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(e.vars.StatusCode()) + + _, writeErr := w.Write(body) + if writeErr != nil { + return writeErr + } + + return nil +} + +func isBrowser(r *http.Request) bool { + return browserRegex.MatchString(r.UserAgent()) +} diff --git a/packages/shared/pkg/proxy/tracking/connection.go b/packages/shared/pkg/proxy/tracking/connection.go new file mode 100644 index 0000000..f7e4e03 --- /dev/null +++ b/packages/shared/pkg/proxy/tracking/connection.go @@ -0,0 +1,31 @@ +package tracking + +import ( + "net" + "sync/atomic" +) + +type Connection struct { + net.Conn + counter *atomic.Int64 +} + +func NewConnection(conn net.Conn, counter *atomic.Int64) *Connection { + counter.Add(1) + + return &Connection{ + Conn: conn, + counter: counter, + } +} + +func (c *Connection) Close() error { + err := c.Conn.Close() + if err != nil { + return err + } + + c.counter.Add(-1) + + return nil +} diff --git a/packages/shared/pkg/proxy/tracking/listener.go b/packages/shared/pkg/proxy/tracking/listener.go new file mode 100644 index 0000000..c8cb127 --- /dev/null +++ b/packages/shared/pkg/proxy/tracking/listener.go @@ -0,0 +1,27 @@ +package tracking + +import ( + "net" + "sync/atomic" +) + +type Listener struct { + net.Listener + counter *atomic.Int64 +} + +func NewListener(l net.Listener, counter *atomic.Int64) *Listener { + return &Listener{ + Listener: l, + counter: counter, + } +} + +func (l *Listener) Accept() (net.Conn, error) { + conn, err := l.Listener.Accept() + if err != nil { + return nil, err + } + + return NewConnection(conn, l.counter), nil +} diff --git a/packages/shared/pkg/schema/access_token.go b/packages/shared/pkg/schema/access_token.go index 56017ac..4885fc6 100644 --- a/packages/shared/pkg/schema/access_token.go +++ b/packages/shared/pkg/schema/access_token.go @@ -15,7 +15,16 @@ type AccessToken struct { func (AccessToken) Fields() []ent.Field { return []ent.Field{ - field.String("id").Unique().StorageKey("access_token").Immutable().SchemaType(map[string]string{dialect.Postgres: "text"}), + field.UUID("id", uuid.UUID{}).Immutable().Unique().Annotations(entsql.Default("gen_random_uuid()")), + field.String("access_token").Unique().Immutable().Sensitive().SchemaType(map[string]string{dialect.Postgres: "text"}), + field.String("access_token_hash").Immutable().Unique().Sensitive().SchemaType(map[string]string{dialect.Postgres: "text"}), + + field.String("access_token_prefix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(10)"}), + field.Int("access_token_length").Immutable(), + field.String("access_token_mask_prefix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(5)"}), + field.String("access_token_mask_suffix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(5)"}), + + field.String("name").SchemaType(map[string]string{dialect.Postgres: "text"}).Default("Unnamed Access Token"), field.UUID("user_id", uuid.UUID{}), field.Time("created_at").Optional().Immutable().Annotations( entsql.Default("CURRENT_TIMESTAMP"), diff --git a/packages/shared/pkg/schema/build.go b/packages/shared/pkg/schema/build.go index e640080..c2ff992 100644 --- a/packages/shared/pkg/schema/build.go +++ b/packages/shared/pkg/schema/build.go @@ -32,9 +32,10 @@ func (EnvBuild) Fields() []ent.Field { field.Time("updated_at").Default(time.Now), field.Time("finished_at").Optional().Nillable(), field.String("env_id").SchemaType(map[string]string{dialect.Postgres: "text"}).Optional().Nillable(), - field.Enum("status").Values("waiting", "building", "failed", "success", "uploaded").Default("waiting").SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Enum("status").Values("waiting", "building", "snapshotting", "failed", "success", "uploaded").Default("waiting").SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("dockerfile").SchemaType(map[string]string{dialect.Postgres: "text"}).Optional().Nillable(), field.String("start_cmd").SchemaType(map[string]string{dialect.Postgres: "text"}).Optional().Nillable(), + field.String("ready_cmd").SchemaType(map[string]string{dialect.Postgres: "text"}).Optional().Nillable(), field.Int64("vcpu"), field.Int64("ram_mb"), field.Int64("free_disk_size_mb"), @@ -42,6 +43,7 @@ func (EnvBuild) Fields() []ent.Field { field.String("kernel_version").Default(DefaultKernelVersion).SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("firecracker_version").Default(DefaultFirecrackerVersion).SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("envd_version").SchemaType(map[string]string{dialect.Postgres: "text"}).Nillable().Optional(), + field.String("cluster_node_id").SchemaType(map[string]string{dialect.Postgres: "text"}).Optional().Nillable(), } } diff --git a/packages/shared/pkg/schema/cluster.go b/packages/shared/pkg/schema/cluster.go new file mode 100644 index 0000000..fdf5066 --- /dev/null +++ b/packages/shared/pkg/schema/cluster.go @@ -0,0 +1,52 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" + "github.com/google/uuid" +) + +// Cluster holds the schema definition for the Cluster entity. +type Cluster struct { + ent.Schema +} + +// Fields of the Cluster. +func (Cluster) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}). + Immutable(). + Unique(). + Annotations(entsql.Default("gen_random_uuid()")), + field.String("endpoint"). + NotEmpty(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Bool("endpoint_tls"). + Default(true), + field.String("token"). + NotEmpty(). + Sensitive(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + } +} + +func (Cluster) Edges() []ent.Edge { + return []ent.Edge{} +} + +func (Cluster) Annotations() []schema.Annotation { + withComments := true + + return []schema.Annotation{ + entsql.Annotation{WithComments: &withComments}, + } +} + +func (Cluster) Mixin() []ent.Mixin { + return []ent.Mixin{ + Mixin{}, + } +} diff --git a/packages/shared/pkg/schema/env.go b/packages/shared/pkg/schema/env.go index a0be6b8..031c6b1 100644 --- a/packages/shared/pkg/schema/env.go +++ b/packages/shared/pkg/schema/env.go @@ -30,6 +30,7 @@ func (Env) Fields() []ent.Field { field.Int32("build_count").Default(1), field.Int64("spawn_count").Default(0).Comment("Number of times the env was spawned"), field.Time("last_spawned_at").Optional().Comment("Timestamp of the last time the env was spawned"), + field.UUID("cluster_id", uuid.UUID{}).Optional().Nillable().SchemaType(map[string]string{dialect.Postgres: "uuid"}), } } diff --git a/packages/shared/pkg/schema/snapshots.go b/packages/shared/pkg/schema/snapshots.go index 80e807e..45c46d7 100644 --- a/packages/shared/pkg/schema/snapshots.go +++ b/packages/shared/pkg/schema/snapshots.go @@ -26,6 +26,8 @@ func (Snapshot) Fields() []ent.Field { field.String("env_id").SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("sandbox_id").Unique().SchemaType(map[string]string{dialect.Postgres: "text"}), field.JSON("metadata", map[string]string{}).SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + field.Time("sandbox_started_at"), + field.Bool("env_secure").Default(false), } } diff --git a/packages/shared/pkg/schema/team.go b/packages/shared/pkg/schema/team.go index 1fea169..6c30b30 100644 --- a/packages/shared/pkg/schema/team.go +++ b/packages/shared/pkg/schema/team.go @@ -27,6 +27,7 @@ func (Team) Fields() []ent.Field { field.String("name").SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("tier").SchemaType(map[string]string{dialect.Postgres: "text"}), field.String("email").MaxLen(255).SchemaType(map[string]string{dialect.Postgres: "character varying(255)"}), + field.UUID("cluster_id", uuid.UUID{}).Optional().Nillable().SchemaType(map[string]string{dialect.Postgres: "uuid"}), } } diff --git a/packages/shared/pkg/schema/team_api_key.go b/packages/shared/pkg/schema/team_api_key.go index 298bf4b..938b4fa 100644 --- a/packages/shared/pkg/schema/team_api_key.go +++ b/packages/shared/pkg/schema/team_api_key.go @@ -20,6 +20,13 @@ func (TeamAPIKey) Fields() []ent.Field { return []ent.Field{ field.UUID("id", uuid.UUID{}).Immutable().Unique().Annotations(entsql.Default("gen_random_uuid()")), field.String("api_key").Unique().Sensitive().SchemaType(map[string]string{dialect.Postgres: "character varying(44)"}), + field.String("api_key_hash").Unique().Sensitive().SchemaType(map[string]string{dialect.Postgres: "character varying(64)"}), + + field.String("api_key_prefix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(10)"}), + field.Int("api_key_length").Immutable(), + field.String("api_key_mask_prefix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(5)"}), + field.String("api_key_mask_suffix").Immutable().SchemaType(map[string]string{dialect.Postgres: "character varying(5)"}), + field.Time("created_at").Immutable().Default(time.Now).Annotations( entsql.Default("CURRENT_TIMESTAMP"), ), diff --git a/packages/shared/pkg/smap/smap.go b/packages/shared/pkg/smap/smap.go index 56e76cf..2937ef9 100644 --- a/packages/shared/pkg/smap/smap.go +++ b/packages/shared/pkg/smap/smap.go @@ -26,6 +26,10 @@ func (m *Map[V]) Insert(key string, value V) { m.m.Set(key, value) } +func (m *Map[V]) Upsert(key string, value V, cb cmap.UpsertCb[V]) V { + return m.m.Upsert(key, value, cb) +} + func (m *Map[V]) InsertIfAbsent(key string, value V) bool { return m.m.SetIfAbsent(key, value) } diff --git a/packages/shared/pkg/storage/gcs/bucket.go b/packages/shared/pkg/storage/gcs/bucket.go deleted file mode 100644 index 5c2c944..0000000 --- a/packages/shared/pkg/storage/gcs/bucket.go +++ /dev/null @@ -1,28 +0,0 @@ -package gcs - -import ( - "context" - "sync" - - "cloud.google.com/go/storage" - - "github.com/e2b-dev/infra/packages/shared/pkg/utils" -) - -type BucketHandle = storage.BucketHandle - -var getClient = sync.OnceValue(func() *storage.Client { - return utils.Must(newClient(context.Background())) -}) - -func newBucket(bucket string) *BucketHandle { - return getClient().Bucket(bucket) -} - -func getTemplateBucketName() string { - return utils.RequiredEnv("TEMPLATE_BUCKET_NAME", "bucket for storing template files") -} - -func GetTemplateBucket() *BucketHandle { - return newBucket(getTemplateBucketName()) -} diff --git a/packages/shared/pkg/storage/gcs/client.go b/packages/shared/pkg/storage/gcs/client.go deleted file mode 100644 index 742bee3..0000000 --- a/packages/shared/pkg/storage/gcs/client.go +++ /dev/null @@ -1,17 +0,0 @@ -package gcs - -import ( - "context" - "fmt" - - "cloud.google.com/go/storage" -) - -func newClient(ctx context.Context) (*storage.Client, error) { - client, err := storage.NewClient(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create GCS client: %w", err) - } - - return client, nil -} diff --git a/packages/shared/pkg/storage/gcs/dir.go b/packages/shared/pkg/storage/gcs/dir.go deleted file mode 100644 index b3d23a9..0000000 --- a/packages/shared/pkg/storage/gcs/dir.go +++ /dev/null @@ -1,33 +0,0 @@ -package gcs - -import ( - "context" - "fmt" - - "cloud.google.com/go/storage" - "google.golang.org/api/iterator" -) - -func RemoveDir(ctx context.Context, bucket *BucketHandle, dir string) error { - objects := bucket.Objects(ctx, &storage.Query{ - Prefix: dir + "/", - }) - - for { - object, err := objects.Next() - if err == iterator.Done { - break - } - - if err != nil { - return fmt.Errorf("error when iterating over template objects: %w", err) - } - - err = bucket.Object(object.Name).Delete(ctx) - if err != nil { - return fmt.Errorf("error when deleting template object: %w", err) - } - } - - return nil -} diff --git a/packages/shared/pkg/storage/gcs/object.go b/packages/shared/pkg/storage/gcs/object.go deleted file mode 100644 index 15ee6ac..0000000 --- a/packages/shared/pkg/storage/gcs/object.go +++ /dev/null @@ -1,174 +0,0 @@ -package gcs - -import ( - "context" - "errors" - "fmt" - "io" - "os/exec" - "time" - - "cloud.google.com/go/storage" - "github.com/googleapis/gax-go/v2" -) - -const ( - readTimeout = 10 * time.Second - operationTimeout = 5 * time.Second - bufferSize = 2 << 21 - initialBackoff = 10 * time.Millisecond - maxBackoff = 10 * time.Second - backoffMultiplier = 2 - maxAttempts = 10 -) - -type Object struct { - object *storage.ObjectHandle - ctx context.Context - name string -} - -func NewObject(ctx context.Context, bucket *storage.BucketHandle, objectPath string) *Object { - obj := bucket.Object(objectPath).Retryer( - storage.WithMaxAttempts(maxAttempts), - storage.WithBackoff(gax.Backoff{ - Initial: initialBackoff, - Max: maxBackoff, - Multiplier: backoffMultiplier, - }), - storage.WithPolicy(storage.RetryAlways), - ) - - return &Object{ - object: obj, - ctx: ctx, - name: objectPath, - } -} - -// Name returns the name of the object -func (o *Object) Name() string { - return o.name -} - -// Reader creates a reader for this object -func (o *Object) Reader(ctx context.Context) (io.ReadCloser, error) { - reader, err := o.object.NewReader(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create GCS reader: %w", err) - } - return reader, nil -} - -func (o *Object) WriteTo(dst io.Writer) (int64, error) { - ctx, cancel := context.WithTimeout(o.ctx, readTimeout) - defer cancel() - - reader, err := o.object.NewReader(ctx) - if err != nil { - return 0, fmt.Errorf("failed to create GCS reader: %w", err) - } - - defer reader.Close() - - b := make([]byte, bufferSize) - - n, err := io.CopyBuffer(dst, reader, b) - if err != nil { - return n, fmt.Errorf("failed to copy GCS object to writer: %w", err) - } - - return n, nil -} - -func (o *Object) ReadFrom(src io.Reader) (int64, error) { - w := o.object.NewWriter(o.ctx) - - n, err := io.Copy(w, src) - if err != nil && !errors.Is(err, io.EOF) { - return n, fmt.Errorf("failed to copy buffer to storage: %w", err) - } - - err = w.Close() - if err != nil { - return n, fmt.Errorf("failed to close GCS writer: %w", err) - } - - return n, nil -} - -func (o *Object) UploadWithCli(ctx context.Context, path string) error { - cmd := exec.CommandContext( - ctx, - "gcloud", - "storage", - "cp", - "--verbosity", - "error", - path, - fmt.Sprintf("gs://%s/%s", o.object.BucketName(), o.object.ObjectName()), - ) - - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("failed to upload file to GCS: %w\n%s", err, string(output)) - } - - return nil -} - -func (o *Object) ReadAt(b []byte, off int64) (n int, err error) { - ctx, cancel := context.WithTimeout(o.ctx, readTimeout) - defer cancel() - - // The file should not be gzip compressed - reader, err := o.object.NewRangeReader(ctx, off, int64(len(b))) - if err != nil { - return 0, fmt.Errorf("failed to create GCS reader: %w", err) - } - - defer reader.Close() - - for reader.Remain() > 0 { - nr, readErr := reader.Read(b[n:]) - n += nr - - if readErr == nil { - continue - } - - if errors.Is(readErr, io.EOF) { - break - } - - return n, fmt.Errorf("failed to read from GCS object: %w", readErr) - } - - return n, nil -} - -func (o *Object) Size(ctx ...context.Context) (int64, error) { - var useCtx context.Context - if len(ctx) > 0 { - useCtx = ctx[0] - } else { - useCtx = o.ctx - } - - useCtx, cancel := context.WithTimeout(useCtx, operationTimeout) - defer cancel() - - attrs, err := o.object.Attrs(useCtx) - if err != nil { - return 0, fmt.Errorf("failed to get GCS object (%s) attributes: %w", o.object.ObjectName(), err) - } - - return attrs.Size, nil -} - -func (o *Object) Delete() error { - ctx, cancel := context.WithTimeout(o.ctx, operationTimeout) - defer cancel() - - return o.object.Delete(ctx) -} diff --git a/packages/shared/pkg/storage/header/diff.go b/packages/shared/pkg/storage/header/diff.go index 38d61fb..31caa30 100644 --- a/packages/shared/pkg/storage/header/diff.go +++ b/packages/shared/pkg/storage/header/diff.go @@ -1,10 +1,14 @@ package header import ( + "bytes" + "context" "fmt" "io" "github.com/bits-and-blooms/bitset" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) const ( @@ -18,20 +22,63 @@ var ( EmptyBlock = make([]byte, RootfsBlockSize) ) -func CreateDiff(source io.ReaderAt, blockSize int64, dirty *bitset.BitSet, diff io.Writer) error { +func WriteDiffWithTrace(ctx context.Context, tracer trace.Tracer, source io.ReaderAt, blockSize int64, dirty *bitset.BitSet, diff io.Writer) (*DiffMetadata, error) { + _, childSpan := tracer.Start(ctx, "create-diff") + defer childSpan.End() + childSpan.SetAttributes(attribute.Int64("dirty.length", int64(dirty.Count()))) + childSpan.SetAttributes(attribute.Int64("block.size", blockSize)) + + return WriteDiff(source, blockSize, dirty, diff) +} + +func WriteDiff(source io.ReaderAt, blockSize int64, dirty *bitset.BitSet, diff io.Writer) (*DiffMetadata, error) { b := make([]byte, blockSize) + empty := bitset.New(0) + for i, e := dirty.NextSet(0); e; i, e = dirty.NextSet(i + 1) { _, err := source.ReadAt(b, int64(i)*blockSize) if err != nil { - return fmt.Errorf("error reading from source: %w", err) + return nil, fmt.Errorf("error reading from source: %w", err) + } + + // If the block is empty, we don't need to write it to the diff. + // Because we checked it does not equal to the base, so we keep it separately. + isEmpty, err := IsEmptyBlock(b, blockSize) + if err != nil { + return nil, fmt.Errorf("error checking empty block: %w", err) + } + if isEmpty { + dirty.Clear(i) + empty.Set(i) + + continue } _, err = diff.Write(b) if err != nil { - return fmt.Errorf("error writing to diff: %w", err) + return nil, fmt.Errorf("error writing to diff: %w", err) } } - return nil + return &DiffMetadata{ + Dirty: dirty, + Empty: empty, + + BlockSize: blockSize, + }, nil +} + +func IsEmptyBlock(block []byte, blockSize int64) (bool, error) { + var emptyBuf []byte + switch blockSize { + case HugepageSize: + emptyBuf = EmptyHugePage + case RootfsBlockSize: + emptyBuf = EmptyBlock + default: + return false, fmt.Errorf("block size not supported: %d", blockSize) + } + + return bytes.Equal(block, emptyBuf), nil } diff --git a/packages/shared/pkg/storage/header/diff_test.go b/packages/shared/pkg/storage/header/diff_test.go new file mode 100644 index 0000000..9272301 --- /dev/null +++ b/packages/shared/pkg/storage/header/diff_test.go @@ -0,0 +1,199 @@ +package header + +import ( + "bytes" + "fmt" + "testing" + + "github.com/bits-and-blooms/bitset" + "github.com/stretchr/testify/assert" +) + +func createSource(blockSize int, blocksData []byte) []byte { + sourceSlice := make([]byte, blockSize*len(blocksData)) + for i, data := range blocksData { + sourceSlice[i*blockSize] = data + } + return sourceSlice +} + +func TestCreateDiff_Hugepage(t *testing.T) { + blockSize := HugepageSize + sourceSlice := createSource(blockSize, []byte{1, 0, 3, 4, 5}) + + source := bytes.NewReader(sourceSlice) + dirty := bitset.New(0) + dirty.Set(0) + dirty.Set(1) + dirty.Set(4) + + diff := bytes.NewBuffer(nil) + m, err := WriteDiff(source, int64(blockSize), dirty, diff) + assert.NoError(t, err) + + expectedDiffData := createSource(blockSize, []byte{1, 5}) + assert.Equal(t, expectedDiffData, diff.Bytes()) + + assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000000010.", m.Empty.DumpAsBits()) +} + +func TestCreateDiff_RootfsBlock(t *testing.T) { + blockSize := RootfsBlockSize + sourceSlice := createSource(blockSize, []byte{1, 0, 3, 4, 5}) + + source := bytes.NewReader(sourceSlice) + dirty := bitset.New(0) + dirty.Set(0) + dirty.Set(1) + dirty.Set(4) + + diff := bytes.NewBuffer(nil) + m, err := WriteDiff(source, int64(blockSize), dirty, diff) + assert.NoError(t, err) + + expectedDiffData := createSource(blockSize, []byte{1, 5}) + assert.Equal(t, expectedDiffData, diff.Bytes()) + + assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000000010.", m.Empty.DumpAsBits()) +} + +func TestCreateDiff_UnsupportedBlockSize(t *testing.T) { + blockSize := 42 + sourceSlice := createSource(blockSize, []byte{1, 0, 3, 4, 5}) + + source := bytes.NewReader(sourceSlice) + dirty := bitset.New(0) + dirty.Set(0) + dirty.Set(1) + dirty.Set(4) + + diff := bytes.NewBuffer(nil) + _, err := WriteDiff(source, int64(blockSize), dirty, diff) + + assert.Error(t, err) +} + +func TestCreateDiff_AllEmptyBlocks(t *testing.T) { + blockSize := HugepageSize + sourceSlice := createSource(blockSize, []byte{0, 0, 0, 0, 0}) + + source := bytes.NewReader(sourceSlice) + dirty := bitset.New(0) + dirty.Set(0) + dirty.Set(1) + dirty.Set(2) + dirty.Set(3) + dirty.Set(4) + + diff := bytes.NewBuffer(nil) + m, err := WriteDiff(source, int64(blockSize), dirty, diff) + assert.NoError(t, err) + + assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000011111.", m.Empty.DumpAsBits()) +} + +func TestCreateDiff_EmptyDirtyBitset(t *testing.T) { + blockSize := HugepageSize + sourceSlice := createSource(blockSize, []byte{1, 2, 3}) + + source := bytes.NewReader(sourceSlice) + dirty := bitset.New(0) + // No blocks are marked as dirty + + diff := bytes.NewBuffer(nil) + m, err := WriteDiff(source, int64(blockSize), dirty, diff) + assert.NoError(t, err) + + // Verify no data was written to diff + assert.Equal(t, 0, diff.Len()) + + assert.Equal(t, "", m.Dirty.DumpAsBits()) + assert.Equal(t, "", m.Empty.DumpAsBits()) +} + +type errorReader struct{} + +func (e errorReader) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("simulated read error") +} + +func TestCreateDiff_ReadError(t *testing.T) { + blockSize := HugepageSize + source := errorReader{} + + dirty := bitset.New(0) + dirty.Set(0) // Mark one block as dirty to trigger ReadAt + + diff := bytes.NewBuffer(nil) + _, err := WriteDiff(source, int64(blockSize), dirty, diff) + + // Verify that the error from ReadAt is propagated + assert.Error(t, err) + assert.Contains(t, err.Error(), "error reading from source") + assert.Contains(t, err.Error(), "simulated read error") +} + +// errorWriter implements io.Writer and always returns an error +type errorWriter struct{} + +func (e errorWriter) Write(p []byte) (n int, err error) { + return 0, fmt.Errorf("simulated write error") +} + +func TestCreateDiff_WriteError(t *testing.T) { + blockSize := HugepageSize + // Create a source with non-empty data to ensure Write is called + sourceSlice := createSource(blockSize, []byte{1, 2, 3}) + source := bytes.NewReader(sourceSlice) + + dirty := bitset.New(0) + dirty.Set(0) // Mark one block as dirty to trigger Write + + diff := errorWriter{} + _, err := WriteDiff(source, int64(blockSize), dirty, diff) + + // Verify that the error from Write is propagated + assert.Error(t, err) + assert.Contains(t, err.Error(), "error writing to diff") + assert.Contains(t, err.Error(), "simulated write error") +} + +func TestCreateDiff_LargeIndex(t *testing.T) { + blockSize := RootfsBlockSize + // Create a source that can handle large offsets + largeSource := &largeOffsetReader{ + data: []byte{42}, // Non-empty data to ensure it's not considered empty + } + + dirty := bitset.New(0) + // Set a very large index to test offset calculation + largeIndex := uint(1000000) + dirty.Set(largeIndex) + + diff := bytes.NewBuffer(nil) + m, err := WriteDiff(largeSource, int64(blockSize), dirty, diff) + assert.NoError(t, err) + + // Verify the large index is still marked as dirty + assert.True(t, m.Dirty.Test(largeIndex)) + assert.False(t, m.Empty.Test(largeIndex)) + + // Verify the data was written to diff + assert.Equal(t, blockSize, diff.Len()) + assert.Equal(t, byte(42), diff.Bytes()[0]) +} + +// largeOffsetReader implements io.ReaderAt and handles large offsets +type largeOffsetReader struct { + data []byte +} + +func (r *largeOffsetReader) ReadAt(p []byte, off int64) (n int, err error) { + // Always return the same data regardless of offset + copy(p, r.data) + // Fill the rest with zeros + for i := len(r.data); i < len(p); i++ { + p[i] = 0 + } + return len(p), nil +} diff --git a/packages/shared/pkg/storage/header/inspect.go b/packages/shared/pkg/storage/header/inspect.go index c2581fc..f1e56ac 100644 --- a/packages/shared/pkg/storage/header/inspect.go +++ b/packages/shared/pkg/storage/header/inspect.go @@ -90,22 +90,22 @@ func ValidateMappings(mappings []*BuildMap, size, blockSize uint64) error { for _, mapping := range mappings { if currentOffset != mapping.Offset { - return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ndoes not start at the correct offset: expected %d (block %d), got %d (block %d)\n", mapping.Format(blockSize), currentOffset, currentOffset/blockSize, mapping.Offset, mapping.Offset/blockSize) + return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ndoes not start at the correct offset: expected %d (block %d), got %d (block %d)", mapping.Format(blockSize), currentOffset, currentOffset/blockSize, mapping.Offset, mapping.Offset/blockSize) } if mapping.Length%blockSize != 0 { - return fmt.Errorf("mapping validation failed: the following mapping\n- %s\nhas an invalid length: %d. It should be a multiple of block size: %d\n", mapping.Format(blockSize), mapping.Length, blockSize) + return fmt.Errorf("mapping validation failed: the following mapping\n- %s\nhas an invalid length: %d. It should be a multiple of block size: %d", mapping.Format(blockSize), mapping.Length, blockSize) } if currentOffset+mapping.Length > size { - return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ngoes beyond the size: %d (current offset) + %d (length) > %d (size)\n", mapping.Format(blockSize), currentOffset, mapping.Length, size) + return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ngoes beyond the size: %d (current offset) + %d (length) > %d (size)", mapping.Format(blockSize), currentOffset, mapping.Length, size) } currentOffset += mapping.Length } if currentOffset != size { - return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ndoes not cover the whole size: %d (current offset) != %d (size)\n", mappings[len(mappings)-1].Format(blockSize), currentOffset, size) + return fmt.Errorf("mapping validation failed: the following mapping\n- %s\ndoes not cover the whole size: %d (current offset) != %d (size)", mappings[len(mappings)-1].Format(blockSize), currentOffset, size) } return nil diff --git a/packages/shared/pkg/storage/header/mapping.go b/packages/shared/pkg/storage/header/mapping.go index c1065e8..f7adbcf 100644 --- a/packages/shared/pkg/storage/header/mapping.go +++ b/packages/shared/pkg/storage/header/mapping.go @@ -20,9 +20,9 @@ type BuildMap struct { } func CreateMapping( - metadata *Metadata, buildId *uuid.UUID, dirty *bitset.BitSet, + blockSize int64, ) []*BuildMap { var mappings []*BuildMap @@ -39,9 +39,9 @@ func CreateMapping( if blockLength > 0 { m := &BuildMap{ - Offset: uint64(int64(startBlock) * int64(metadata.BlockSize)), + Offset: uint64(startBlock) * uint64(blockSize), BuildId: *buildId, - Length: uint64(blockLength) * uint64(metadata.BlockSize), + Length: uint64(blockLength) * uint64(blockSize), BuildStorageOffset: buildStorageOffset, } @@ -56,9 +56,9 @@ func CreateMapping( if blockLength > 0 { mappings = append(mappings, &BuildMap{ - Offset: uint64(startBlock) * metadata.BlockSize, + Offset: uint64(startBlock) * uint64(blockSize), BuildId: *buildId, - Length: uint64(blockLength) * uint64(metadata.BlockSize), + Length: uint64(blockLength) * uint64(blockSize), BuildStorageOffset: buildStorageOffset, }) } @@ -234,3 +234,15 @@ func MergeMappings( return mappings } + +// NormalizeMappings joins adjacent mappings that have the same buildId. +func NormalizeMappings(mappings []*BuildMap) []*BuildMap { + for i := 0; i < len(mappings); i++ { + if i+1 < len(mappings) && mappings[i].BuildId == mappings[i+1].BuildId { + mappings[i].Length += mappings[i+1].Length + mappings = append(mappings[:i+1], mappings[i+2:]...) + } + } + + return mappings +} diff --git a/packages/shared/pkg/storage/header/mapping_test.go b/packages/shared/pkg/storage/header/mapping_test.go index a86e7a7..d9952ff 100644 --- a/packages/shared/pkg/storage/header/mapping_test.go +++ b/packages/shared/pkg/storage/header/mapping_test.go @@ -4,13 +4,14 @@ import ( "testing" "github.com/google/uuid" - "github.com/stretchr/testify/require" ) -var ignoreID = uuid.Nil -var baseID = uuid.New() -var diffID = uuid.New() +var ( + ignoreID = uuid.Nil + baseID = uuid.New() + diffID = uuid.New() +) var blockSize = uint64(2 << 20) diff --git a/packages/shared/pkg/storage/header/metadata.go b/packages/shared/pkg/storage/header/metadata.go new file mode 100644 index 0000000..f18ec99 --- /dev/null +++ b/packages/shared/pkg/storage/header/metadata.go @@ -0,0 +1,44 @@ +package header + +import ( + "context" + + "github.com/bits-and-blooms/bitset" + "github.com/google/uuid" + + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +var ignoreBuildID = uuid.Nil + +type DiffMetadata struct { + Dirty *bitset.BitSet + Empty *bitset.BitSet + + BlockSize int64 +} + +func (d *DiffMetadata) CreateMapping( + ctx context.Context, + buildID uuid.UUID, +) (mapping []*BuildMap, e error) { + dirtyMappings := CreateMapping( + &buildID, + d.Dirty, + d.BlockSize, + ) + telemetry.ReportEvent(ctx, "created dirty mapping") + + emptyMappings := CreateMapping( + // This buildID is intentionally ignored for nil blocks + &ignoreBuildID, + d.Empty, + d.BlockSize, + ) + telemetry.ReportEvent(ctx, "created empty mapping") + + mappings := MergeMappings(dirtyMappings, emptyMappings) + telemetry.ReportEvent(ctx, "merge mappings") + + return mappings, nil +} diff --git a/packages/shared/pkg/storage/header/serialization.go b/packages/shared/pkg/storage/header/serialization.go index 3a9edf3..3ae60ce 100644 --- a/packages/shared/pkg/storage/header/serialization.go +++ b/packages/shared/pkg/storage/header/serialization.go @@ -19,6 +19,27 @@ type Metadata struct { BaseBuildId uuid.UUID } +func NewTemplateMetadata(buildId uuid.UUID, blockSize, size uint64) *Metadata { + return &Metadata{ + Version: 1, + Generation: 0, + BlockSize: blockSize, + Size: size, + BuildId: buildId, + BaseBuildId: buildId, + } +} + +func (m *Metadata) NextGeneration(buildID uuid.UUID) *Metadata { + return &Metadata{ + Version: 1, + Generation: m.Generation + 1, + BlockSize: m.BlockSize, + Size: m.Size, + BuildId: buildID, + BaseBuildId: m.BaseBuildId, + } +} func Serialize(metadata *Metadata, mappings []*BuildMap) (io.Reader, error) { var buf bytes.Buffer diff --git a/packages/shared/pkg/storage/s3/bucket.go b/packages/shared/pkg/storage/s3/bucket.go deleted file mode 100644 index fdd7f6e..0000000 --- a/packages/shared/pkg/storage/s3/bucket.go +++ /dev/null @@ -1,33 +0,0 @@ -package s3 - -import ( - "context" - "sync" - - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/e2b-dev/infra/packages/shared/pkg/utils" -) - -type BucketHandle struct { - Name string - Client *s3.Client -} - -var getClient = sync.OnceValue(func() *s3.Client { - return utils.Must(newClient(context.Background())) -}) - -func newBucket(bucket string) *BucketHandle { - return &BucketHandle{ - Name: bucket, - Client: getClient(), - } -} - -func getTemplateBucketName() string { - return utils.RequiredEnv("TEMPLATE_BUCKET_NAME", "bucket for storing template files") -} - -func GetTemplateBucket() *BucketHandle { - return newBucket(getTemplateBucketName()) -} diff --git a/packages/shared/pkg/storage/s3/client.go b/packages/shared/pkg/storage/s3/client.go deleted file mode 100644 index d88a440..0000000 --- a/packages/shared/pkg/storage/s3/client.go +++ /dev/null @@ -1,39 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -// 客户端配置常量 -const ( - // 客户端连接超时时间 - clientConnectTimeout = 30 * time.Second - // 最大重试次数 - maxRetryAttempts = 3 -) - -// createS3Client 创建并配置一个新的S3客户端 -// 该函数加载AWS默认配置并创建S3服务客户端 -func newClient(ctx context.Context) (*s3.Client, error) { - // 创建带超时的上下文 - ctxWithTimeout, cancel := context.WithTimeout(ctx, clientConnectTimeout) - defer cancel() - - // 加载AWS配置 - configOptions := []func(*config.LoadOptions) error{ - config.WithRetryMaxAttempts(maxRetryAttempts), - } - - cfg, err := config.LoadDefaultConfig(ctxWithTimeout, configOptions...) - if err != nil { - return nil, fmt.Errorf("无法加载AWS配置: %w", err) - } - - // 创建并返回S3客户端 - return s3.NewFromConfig(cfg), nil -} diff --git a/packages/shared/pkg/storage/s3/dir.go b/packages/shared/pkg/storage/s3/dir.go deleted file mode 100644 index 209b07b..0000000 --- a/packages/shared/pkg/storage/s3/dir.go +++ /dev/null @@ -1,55 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go-v2/service/s3/types" -) - -// RemoveDir deletes all objects under the specified directory prefix in the bucket -func RemoveDir(ctx context.Context, bucket *BucketHandle, dirPath string) error { - // Create a paginator to list all objects with the directory prefix - objectLister := s3.NewListObjectsV2Paginator(bucket.Client, &s3.ListObjectsV2Input{ - Bucket: aws.String(bucket.Name), - Prefix: aws.String(dirPath + "/"), - }) - - // Process each page of results - for objectLister.HasMorePages() { - // Get the next page of objects - resultPage, err := objectLister.NextPage(ctx) - if err != nil { - return fmt.Errorf("failed to list objects in directory '%s': %w", dirPath, err) - } - - // If no objects found, we're done - if len(resultPage.Contents) == 0 { - break - } - - // Prepare object identifiers for batch deletion - objectsToDelete := make([]types.ObjectIdentifier, len(resultPage.Contents)) - for i, objectInfo := range resultPage.Contents { - objectsToDelete[i] = types.ObjectIdentifier{ - Key: objectInfo.Key, - } - } - - // Execute batch deletion - _, err = bucket.Client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ - Bucket: aws.String(bucket.Name), - Delete: &types.Delete{ - Objects: objectsToDelete, - }, - }) - - if err != nil { - return fmt.Errorf("failed to delete objects in directory '%s': %w", dirPath, err) - } - } - - return nil -} diff --git a/packages/shared/pkg/storage/s3/object.go b/packages/shared/pkg/storage/s3/object.go deleted file mode 100644 index 04ea02a..0000000 --- a/packages/shared/pkg/storage/s3/object.go +++ /dev/null @@ -1,231 +0,0 @@ -package s3 - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/feature/s3/manager" - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -// 存储操作相关常量 -const ( - // 读取操作超时 - readTimeout = 10 * time.Second - // 一般操作超时 - operationTimeout = 5 * time.Second - // 缓冲区大小 - bufferSize = 2 << 21 - // 初始重试等待时间 - initialBackoff = 10 * time.Millisecond - // 最大重试等待时间 - maxBackoff = 10 * time.Second - // 重试等待时间倍数 - backoffMultiplier = 2 - // 最大重试次数 - maxAttempts = 10 -) - -// 存储操作接口定义 -type StorageOperations interface { - WriteTo(dst io.Writer) (int64, error) - ReadFrom(src io.Reader) (int64, error) - ReadAt(b []byte, off int64) (n int, err error) - Size() (int64, error) - Delete() error -} - -// Object 表示S3存储桶中的一个对象 -type Object struct { - bucket *BucketHandle - key string - ctx context.Context -} - -// 确保Object实现了StorageOperations接口 -var _ StorageOperations = (*Object)(nil) - -// 对象管理相关函数 - -// NewObject 创建一个新的S3对象引用 -func NewObject(ctx context.Context, bucket *BucketHandle, objectPath string) *Object { - return &Object{ - bucket: bucket, - key: objectPath, - ctx: ctx, - } -} - -// Delete 删除S3对象 -func (o *Object) Delete() error { - ctx, cancel := context.WithTimeout(o.ctx, operationTimeout) - defer cancel() - - _, err := o.bucket.Client.DeleteObject(ctx, &s3.DeleteObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - }) - - if err != nil { - return fmt.Errorf("删除S3对象失败: %w", err) - } - - return nil -} - -// Size 获取S3对象的大小 -func (o *Object) Size() (int64, error) { - ctx, cancel := context.WithTimeout(o.ctx, operationTimeout) - defer cancel() - - resp, err := o.bucket.Client.HeadObject(ctx, &s3.HeadObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - }) - - if err != nil { - return 0, fmt.Errorf("获取S3对象(%s)属性失败: %w", o.key, err) - } - - return *resp.ContentLength, nil -} - -// 数据读写相关函数 - -// WriteTo 将S3对象内容写入目标写入器 -func (o *Object) WriteTo(dst io.Writer) (int64, error) { - ctx, cancel := context.WithTimeout(o.ctx, readTimeout) - defer cancel() - - resp, err := o.bucket.Client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - }) - if err != nil { - return 0, fmt.Errorf("下载S3对象失败: %w", err) - } - defer resp.Body.Close() - - return io.Copy(dst, resp.Body) -} - -// ReadFrom 从源读取器读取内容并上传到S3对象 -func (o *Object) ReadFrom(src io.Reader) (int64, error) { - uploader := manager.NewUploader(o.bucket.Client) - - _, err := uploader.Upload(o.ctx, &s3.PutObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - Body: src, - }) - - if err != nil { - return 0, fmt.Errorf("上传到S3失败: %w", err) - } - - // S3 API不返回写入的字节数,所以这里返回0 - return 0, nil -} - -// ReadAt 从S3对象的指定偏移量读取数据 -func (o *Object) ReadAt(b []byte, off int64) (n int, err error) { - ctx, cancel := context.WithTimeout(o.ctx, readTimeout) - defer cancel() - - resp, err := o.bucket.Client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - Range: aws.String(fmt.Sprintf("bytes=%d-%d", off, off+int64(len(b))-1)), - }) - - if err != nil { - return 0, fmt.Errorf("创建S3读取器失败: %w", err) - } - - defer resp.Body.Close() - - return readAllFromResponse(resp.Body, b) -} - -// 辅助函数 - -// readAllFromResponse 从响应体读取数据到缓冲区 -func readAllFromResponse(body io.ReadCloser, buffer []byte) (int, error) { - var totalRead int - - for { - nr, readErr := body.Read(buffer[totalRead:]) - totalRead += nr - - if readErr == nil { - continue - } - - if errors.Is(readErr, io.EOF) { - break - } - - return totalRead, fmt.Errorf("从响应体读取失败: %w", readErr) - } - - return totalRead, nil -} - -// 文件上传相关函数 - -// UploadWithCli 使用AWS CLI上传文件到S3 -func (o *Object) UploadWithCli(ctx context.Context, path string) error { - cmd := exec.CommandContext( - ctx, - "aws", - "s3", - "cp", - path, - fmt.Sprintf("s3://%s/%s", o.bucket.Name, o.key), - ) - - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("使用CLI上传文件到S3失败: %w\n%s", err, string(output)) - } - - return nil -} - -// Upload 上传本地文件到S3对象 -func (o *Object) Upload(ctx context.Context, path string) error { - return o.uploadFileWithMultipart(ctx, path) -} - -// uploadFileWithMultipart 使用分块上传方式上传文件 -func (o *Object) uploadFileWithMultipart(ctx context.Context, path string) error { - // 打开本地文件 - file, err := os.Open(path) - if err != nil { - return fmt.Errorf("打开文件失败: %w", err) - } - defer file.Close() - - // 创建上传管理器并配置分块大小 - uploader := manager.NewUploader(o.bucket.Client, func(u *manager.Uploader) { - u.PartSize = 100 * 1024 * 1024 // 100MB per part - }) - - // 执行上传 - _, err = uploader.Upload(ctx, &s3.PutObjectInput{ - Bucket: aws.String(o.bucket.Name), - Key: aws.String(o.key), - Body: file, - }) - if err != nil { - return fmt.Errorf("上传失败: %w", err) - } - - return nil -} diff --git a/packages/shared/pkg/storage/s3/object_test.go b/packages/shared/pkg/storage/s3/object_test.go deleted file mode 100644 index 04b9fd7..0000000 --- a/packages/shared/pkg/storage/s3/object_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package s3 - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// 测试环境配置 -type testConfig struct { - bucketName string - region string - filePath string - timeout time.Duration -} - -// 从环境变量获取测试配置 -func getTestConfig() testConfig { - return testConfig{ - bucketName: os.Getenv("TEMPLATE_BUCKET_NAME"), - region: getEnvWithDefault("AWS_REGION", "us-east-1"), - filePath: getEnvWithDefault("LARGE_FILE_PATH", "object.go"), - timeout: 30 * time.Second, - } -} - -// 获取环境变量,如果不存在则使用默认值 -func getEnvWithDefault(key, defaultValue string) string { - value := os.Getenv(key) - if value == "" { - return defaultValue - } - return value -} - -// 创建测试用的S3客户端 -func createTestS3Client(t *testing.T, region string) *s3.Client { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - cfg, err := config.LoadDefaultConfig(ctx, - config.WithRegion(region)) - require.NoError(t, err, "加载AWS配置应该成功") - - return s3.NewFromConfig(cfg) -} - -// TestObject_WithRealS3Client 使用真实S3客户端测试对象操作 -func TestObject_WithRealS3Client(t *testing.T) { - // 获取测试配置 - cfg := getTestConfig() - - // 检查必要的环境变量 - if cfg.bucketName == "" { - t.Fatal("未设置TEMPLATE_BUCKET_NAME环境变量") - return - } - - // 记录测试配置 - t.Logf("测试配置: 存储桶=%s, 区域=%s, 文件=%s", - cfg.bucketName, cfg.region, cfg.filePath) - - // 创建测试上下文 - ctx, cancel := context.WithTimeout(context.Background(), cfg.timeout) - defer cancel() - - // 创建S3客户端 - client := createTestS3Client(t, cfg.region) - - // 创建存储桶处理器 - bucket := &BucketHandle{ - Name: cfg.bucketName, - Client: client, - } - - // 运行测试用例 - t.Run("上传和删除对象", func(t *testing.T) { - testUploadAndDelete(t, ctx, bucket, cfg.filePath) - }) -} - -// 测试上传和删除功能 -func testUploadAndDelete(t *testing.T, ctx context.Context, bucket *BucketHandle, filePath string) { - // 创建测试对象 - obj := NewObject(ctx, bucket, filePath) - - // 测试上传功能 - err := obj.Upload(ctx, filePath) - assert.NoError(t, err, "上传对象应该成功") - t.Logf("成功上传对象 %s", filePath) - - // 验证对象存在并获取大小 - size, err := obj.Size() - assert.NoError(t, err, "获取对象大小应该成功") - t.Logf("对象大小: %d 字节", size) - - // 测试删除功能 - err = obj.Delete() - assert.NoError(t, err, "删除对象应该成功") - t.Logf("成功删除对象 %s", filePath) - - // 验证对象已被删除 - _, err = obj.Size() - assert.Error(t, err, "对象应该已被删除") - t.Logf("确认对象已被删除") -} diff --git a/packages/shared/pkg/storage/storage.go b/packages/shared/pkg/storage/storage.go new file mode 100644 index 0000000..bd99445 --- /dev/null +++ b/packages/shared/pkg/storage/storage.go @@ -0,0 +1,63 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/e2b-dev/infra/packages/shared/pkg/env" + "github.com/e2b-dev/infra/packages/shared/pkg/utils" +) + +var ErrorObjectNotExist = errors.New("object does not exist") + +type Provider string + +const ( + GCPStorageProvider Provider = "GCPBucket" + AWSStorageProvider Provider = "AWSBucket" + LocalStorageProvider Provider = "Local" + + DefaultStorageProvider Provider = GCPStorageProvider + + storageProviderEnv = "STORAGE_PROVIDER" +) + +type StorageProvider interface { + DeleteObjectsWithPrefix(ctx context.Context, prefix string) error + OpenObject(ctx context.Context, path string) (StorageObjectProvider, error) + GetDetails() string +} + +type StorageObjectProvider interface { + WriteTo(dst io.Writer) (int64, error) + WriteFromFileSystem(path string) error + + ReadFrom(src io.Reader) (int64, error) + ReadAt(buff []byte, off int64) (n int, err error) + + Size() (int64, error) + Delete() error +} + +func GetTemplateStorageProvider(ctx context.Context) (StorageProvider, error) { + provider := Provider(env.GetEnv(storageProviderEnv, string(DefaultStorageProvider))) + + if provider == LocalStorageProvider { + basePath := env.GetEnv("LOCAL_TEMPLATE_STORAGE_BASE_PATH", "/tmp/templates") + return NewFileSystemStorageProvider(basePath) + } + + bucketName := utils.RequiredEnv("TEMPLATE_BUCKET_NAME", "Bucket for storing template files") + + // cloud bucket-based storage + switch provider { + case AWSStorageProvider: + return NewAWSBucketStorageProvider(ctx, bucketName) + case GCPStorageProvider: + return NewGCPBucketStorageProvider(ctx, bucketName) + } + + return nil, fmt.Errorf("unknown storage provider: %s", provider) +} diff --git a/packages/shared/pkg/storage/storage_aws.go b/packages/shared/pkg/storage/storage_aws.go new file mode 100644 index 0000000..d18dc66 --- /dev/null +++ b/packages/shared/pkg/storage/storage_aws.go @@ -0,0 +1,199 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +const ( + awsOperationTimeout = 5 * time.Second + awsWriteTimeout = 30 * time.Second + awsReadTimeout = 15 * time.Second +) + +type AWSBucketStorageProvider struct { + client *s3.Client + bucketName string +} + +type AWSBucketStorageObjectProvider struct { + client *s3.Client + path string + bucketName string + ctx context.Context +} + +func NewAWSBucketStorageProvider(ctx context.Context, bucketName string) (*AWSBucketStorageProvider, error) { + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(cfg) + + return &AWSBucketStorageProvider{ + client: client, + bucketName: bucketName, + }, nil +} + +func (a *AWSBucketStorageProvider) DeleteObjectsWithPrefix(ctx context.Context, prefix string) error { + ctx, cancel := context.WithTimeout(ctx, awsOperationTimeout) + defer cancel() + + list, err := a.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{Bucket: &a.bucketName, Prefix: &prefix}) + if err != nil { + return err + } + + objects := make([]types.ObjectIdentifier, 0, len(list.Contents)) + for _, obj := range list.Contents { + objects = append(objects, types.ObjectIdentifier{Key: obj.Key}) + } + + _, err = a.client.DeleteObjects( + ctx, &s3.DeleteObjectsInput{ + Bucket: &a.bucketName, + Delete: &types.Delete{Objects: objects}, + }, + ) + + return err +} + +func (a *AWSBucketStorageProvider) GetDetails() string { + return fmt.Sprintf("[AWS Storage, bucket set to %s]", a.bucketName) +} + +func (a *AWSBucketStorageProvider) OpenObject(ctx context.Context, path string) (StorageObjectProvider, error) { + return &AWSBucketStorageObjectProvider{ + client: a.client, + bucketName: a.bucketName, + path: path, + ctx: ctx, + }, nil +} + +func (a *AWSBucketStorageObjectProvider) WriteTo(dst io.Writer) (int64, error) { + ctx, cancel := context.WithTimeout(a.ctx, awsReadTimeout) + defer cancel() + + resp, err := a.client.GetObject(ctx, &s3.GetObjectInput{Bucket: &a.bucketName, Key: &a.path}) + if err != nil { + var nsk *types.NoSuchKey + if errors.As(err, &nsk) { + return 0, ErrorObjectNotExist + } + + return 0, err + } + + defer resp.Body.Close() + + return io.Copy(dst, resp.Body) +} + +func (a *AWSBucketStorageObjectProvider) WriteFromFileSystem(path string) error { + ctx, cancel := context.WithTimeout(a.ctx, awsWriteTimeout) + defer cancel() + + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + uploader := manager.NewUploader( + a.client, + func(u *manager.Uploader) { + u.PartSize = 10 * 1024 * 1024 // 10 MB + u.Concurrency = 8 // eight parts in flight + }, + ) + + _, err = uploader.Upload( + ctx, + &s3.PutObjectInput{ + Bucket: &a.bucketName, + Key: &a.path, + Body: file, + }, + ) + + return err +} + +func (a *AWSBucketStorageObjectProvider) ReadFrom(src io.Reader) (int64, error) { + ctx, cancel := context.WithTimeout(a.ctx, awsWriteTimeout) + defer cancel() + + _, err := a.client.PutObject( + ctx, + &s3.PutObjectInput{ + Bucket: &a.bucketName, + Key: &a.path, + Body: src, + }, + ) + if err != nil { + return 0, err + } + + return 0, nil +} + +func (a *AWSBucketStorageObjectProvider) ReadAt(buff []byte, off int64) (n int, err error) { + ctx, cancel := context.WithTimeout(a.ctx, awsReadTimeout) + defer cancel() + + readRange := aws.String(fmt.Sprintf("bytes=%d-%d", off, off+int64(len(buff))-1)) + resp, err := a.client.GetObject(ctx, &s3.GetObjectInput{Bucket: &a.bucketName, Key: &a.path, Range: readRange}) + if err != nil { + var nsk *types.NoSuchKey + if errors.As(err, &nsk) { + return 0, ErrorObjectNotExist + } + + return 0, err + } + + defer resp.Body.Close() + + return io.ReadFull(resp.Body, buff) +} + +func (a *AWSBucketStorageObjectProvider) Size() (int64, error) { + ctx, cancel := context.WithTimeout(a.ctx, awsOperationTimeout) + defer cancel() + + resp, err := a.client.HeadObject(ctx, &s3.HeadObjectInput{Bucket: &a.bucketName, Key: &a.path}) + if err != nil { + return 0, err + } + + return *resp.ContentLength, nil +} + +func (a *AWSBucketStorageObjectProvider) Delete() error { + ctx, cancel := context.WithTimeout(a.ctx, awsOperationTimeout) + defer cancel() + + _, err := a.client.DeleteObject( + ctx, &s3.DeleteObjectInput{ + Bucket: &a.bucketName, + Key: &a.path, + }, + ) + + return err +} diff --git a/packages/shared/pkg/storage/storage_fs.go b/packages/shared/pkg/storage/storage_fs.go new file mode 100644 index 0000000..70144c5 --- /dev/null +++ b/packages/shared/pkg/storage/storage_fs.go @@ -0,0 +1,149 @@ +package storage + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" +) + +type FileSystemStorageProvider struct { + basePath string + opened map[string]*os.File + + StorageProvider +} + +type FileSystemStorageObjectProvider struct { + path string + ctx context.Context +} + +func NewFileSystemStorageProvider(basePath string) (*FileSystemStorageProvider, error) { + return &FileSystemStorageProvider{ + basePath: basePath, + opened: make(map[string]*os.File), + }, nil +} + +func (fs *FileSystemStorageProvider) DeleteObjectsWithPrefix(_ context.Context, prefix string) error { + filePath := fs.getPath(prefix) + return os.RemoveAll(filePath) +} + +func (fs *FileSystemStorageProvider) GetDetails() string { + return fmt.Sprintf("[Local file storage, base path set to %s]", fs.basePath) +} + +func (fs *FileSystemStorageProvider) OpenObject(ctx context.Context, path string) (StorageObjectProvider, error) { + dir := filepath.Dir(fs.getPath(path)) + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, err + } + + return &FileSystemStorageObjectProvider{ + path: fs.getPath(path), + ctx: ctx, + }, nil +} + +func (fs *FileSystemStorageProvider) getPath(path string) string { + return filepath.Join(fs.basePath, path) +} + +func (f *FileSystemStorageObjectProvider) WriteTo(dst io.Writer) (int64, error) { + handle, err := f.getHandle(true) + if err != nil { + return 0, err + } + + defer handle.Close() + + return io.Copy(dst, handle) +} + +func (f *FileSystemStorageObjectProvider) WriteFromFileSystem(path string) error { + handle, err := f.getHandle(false) + if err != nil { + return err + } + defer handle.Close() + + src, err := os.Open(path) + if err != nil { + return err + } + defer src.Close() + + _, err = io.Copy(handle, src) + if err != nil { + return err + } + + return nil +} + +func (f *FileSystemStorageObjectProvider) ReadFrom(src io.Reader) (int64, error) { + handle, err := f.getHandle(false) + if err != nil { + return 0, err + } + defer handle.Close() + + return io.Copy(handle, src) +} + +func (f *FileSystemStorageObjectProvider) ReadAt(buff []byte, off int64) (n int, err error) { + handle, err := f.getHandle(true) + if err != nil { + return 0, err + } + defer handle.Close() + + return handle.ReadAt(buff, off) +} + +func (f *FileSystemStorageObjectProvider) Size() (int64, error) { + handle, err := f.getHandle(true) + if err != nil { + return 0, err + } + defer handle.Close() + + fileInfo, err := handle.Stat() + if err != nil { + return 0, err + } + + return fileInfo.Size(), nil +} + +func (f *FileSystemStorageObjectProvider) Delete() error { + return os.Remove(f.path) +} + +func (f *FileSystemStorageObjectProvider) getHandle(checkExistence bool) (*os.File, error) { + if checkExistence { + info, err := os.Stat(f.path) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrorObjectNotExist + } + + return nil, err + } + + if info.IsDir() { + return nil, fmt.Errorf("path %s is a directory", f.path) + } + + } + + handle, err := os.OpenFile(f.path, os.O_RDWR|os.O_CREATE, 0o644) + if err != nil { + return nil, err + } + + return handle, nil +} diff --git a/packages/shared/pkg/storage/storage_fs_test.go b/packages/shared/pkg/storage/storage_fs_test.go new file mode 100644 index 0000000..21185b7 --- /dev/null +++ b/packages/shared/pkg/storage/storage_fs_test.go @@ -0,0 +1,128 @@ +package storage + +import ( + "bytes" + "context" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// helper to create a FileSystemStorageProvider rooted in a temp directory. +func newTempProvider(t *testing.T) *FileSystemStorageProvider { + t.Helper() + + base := t.TempDir() + p, err := NewFileSystemStorageProvider(base) + require.NoError(t, err) + return p +} + +func TestOpenObject_ReadWrite_Size_ReadAt(t *testing.T) { + p := newTempProvider(t) + ctx := context.Background() + + obj, err := p.OpenObject(ctx, filepath.Join("sub", "file.txt")) + require.NoError(t, err) + + const contents = "hello world" + // write via ReadFrom + n, err := obj.ReadFrom(strings.NewReader(contents)) + require.NoError(t, err) + require.Equal(t, int64(len(contents)), n) + + // check Size + size, err := obj.Size() + require.NoError(t, err) + require.Equal(t, int64(len(contents)), size) + + // read the entire file back via WriteTo + var buf bytes.Buffer + n, err = obj.WriteTo(&buf) + require.NoError(t, err) + require.Equal(t, int64(len(contents)), n) + require.Equal(t, contents, buf.String()) + + // read a slice via ReadAt ("world") + part := make([]byte, 5) + nRead, err := obj.ReadAt(part, 6) + require.NoError(t, err) + require.Equal(t, 5, nRead) + require.Equal(t, "world", string(part)) +} + +func TestWriteFromFileSystem(t *testing.T) { + p := newTempProvider(t) + ctx := context.Background() + + // create a separate source file on disk + srcPath := filepath.Join(t.TempDir(), "src.txt") + const payload = "copy me please" + require.NoError(t, os.WriteFile(srcPath, []byte(payload), 0o600)) + + obj, err := p.OpenObject(ctx, "copy/dst.txt") + require.NoError(t, err) + require.NoError(t, obj.WriteFromFileSystem(srcPath)) + + var buf bytes.Buffer + _, err = obj.WriteTo(&buf) + require.NoError(t, err) + require.Equal(t, payload, buf.String()) +} + +func TestDelete(t *testing.T) { + p := newTempProvider(t) + ctx := context.Background() + + obj, err := p.OpenObject(ctx, "to/delete.txt") + require.NoError(t, err) + + _, err = obj.ReadFrom(strings.NewReader("bye")) + require.NoError(t, err) + require.NoError(t, obj.Delete()) + + // subsequent Size call should fail with ErrorObjectNotExist + _, err = obj.Size() + require.ErrorIs(t, err, ErrorObjectNotExist) +} + +func TestDeleteObjectsWithPrefix(t *testing.T) { + p := newTempProvider(t) + ctx := context.Background() + + paths := []string{ + "data/a.txt", + "data/b.txt", + "data/sub/c.txt", + } + for _, pth := range paths { + obj, err := p.OpenObject(ctx, pth) + require.NoError(t, err) + _, err = obj.ReadFrom(strings.NewReader("x")) + require.NoError(t, err) + } + + // remove the entire "data" prefix + require.NoError(t, p.DeleteObjectsWithPrefix(ctx, "data")) + + for _, pth := range paths { + full := filepath.Join(p.GetDetails()[len("[Local file storage, base path set to "):len(p.GetDetails())-1], pth) // derive basePath + _, err := os.Stat(full) + require.True(t, os.IsNotExist(err)) + } +} + +func TestWriteToNonExistentObject(t *testing.T) { + p := newTempProvider(t) + + ctx := context.Background() + obj, err := p.OpenObject(ctx, "missing/file.txt") + require.NoError(t, err) + + var sink bytes.Buffer + _, err = obj.WriteTo(&sink) + require.ErrorIs(t, err, ErrorObjectNotExist) +} diff --git a/packages/shared/pkg/storage/storage_google.go b/packages/shared/pkg/storage/storage_google.go new file mode 100644 index 0000000..ff9963f --- /dev/null +++ b/packages/shared/pkg/storage/storage_google.go @@ -0,0 +1,204 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "io" + "os/exec" + "time" + + "cloud.google.com/go/storage" + "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" +) + +const ( + googleReadTimeout = 10 * time.Second + googleOperationTimeout = 5 * time.Second + googleBufferSize = 2 << 21 + googleInitialBackoff = 10 * time.Millisecond + googleMaxBackoff = 10 * time.Second + googleBackoffMultiplier = 2 + googleMaxAttempts = 10 +) + +type GCPBucketStorageProvider struct { + client *storage.Client + bucket *storage.BucketHandle +} + +type GCPBucketStorageObjectProvider struct { + storage *GCPBucketStorageProvider + path string + handle *storage.ObjectHandle + ctx context.Context +} + +func NewGCPBucketStorageProvider(ctx context.Context, bucketName string) (*GCPBucketStorageProvider, error) { + client, err := storage.NewClient(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create GCS client: %w", err) + } + + return &GCPBucketStorageProvider{ + client: client, + bucket: client.Bucket(bucketName), + }, nil +} + +func (g *GCPBucketStorageProvider) DeleteObjectsWithPrefix(ctx context.Context, prefix string) error { + objects := g.bucket.Objects(ctx, &storage.Query{Prefix: prefix + "/"}) + + for { + object, err := objects.Next() + if errors.Is(err, iterator.Done) { + break + } + + if err != nil { + return fmt.Errorf("error when iterating over template objects: %w", err) + } + + err = g.bucket.Object(object.Name).Delete(ctx) + if err != nil { + return fmt.Errorf("error when deleting template object: %w", err) + } + } + + return nil +} + +func (g *GCPBucketStorageProvider) GetDetails() string { + return fmt.Sprintf("[GCP Storage, bucket set to %s]", g.bucket.BucketName()) +} + +func (g *GCPBucketStorageProvider) OpenObject(ctx context.Context, path string) (StorageObjectProvider, error) { + handle := g.bucket.Object(path).Retryer( + storage.WithMaxAttempts(googleMaxAttempts), + storage.WithPolicy(storage.RetryAlways), + storage.WithBackoff( + gax.Backoff{ + Initial: googleInitialBackoff, + Max: googleMaxBackoff, + Multiplier: googleBackoffMultiplier, + }, + ), + ) + + return &GCPBucketStorageObjectProvider{ + storage: g, + path: path, + handle: handle, + ctx: ctx, + }, nil +} + +func (g *GCPBucketStorageObjectProvider) Delete() error { + ctx, cancel := context.WithTimeout(g.ctx, googleOperationTimeout) + defer cancel() + + return g.handle.Delete(ctx) +} + +func (g *GCPBucketStorageObjectProvider) Size() (int64, error) { + ctx, cancel := context.WithTimeout(g.ctx, googleOperationTimeout) + defer cancel() + + attrs, err := g.handle.Attrs(ctx) + if err != nil { + return 0, fmt.Errorf("failed to get GCS object (%s) attributes: %w", g.path, err) + } + + return attrs.Size, nil +} + +func (g *GCPBucketStorageObjectProvider) ReadAt(buff []byte, off int64) (n int, err error) { + ctx, cancel := context.WithTimeout(g.ctx, googleReadTimeout) + defer cancel() + + // The file should not be gzip compressed + reader, err := g.handle.NewRangeReader(ctx, off, int64(len(buff))) + if err != nil { + return 0, fmt.Errorf("failed to create GCS reader: %w", err) + } + + defer reader.Close() + + for reader.Remain() > 0 { + nr, readErr := reader.Read(buff[n:]) + n += nr + + if readErr == nil { + continue + } + + if errors.Is(readErr, io.EOF) { + break + } + + return n, fmt.Errorf("failed to read from GCS object: %w", readErr) + } + + return n, nil +} + +func (g *GCPBucketStorageObjectProvider) ReadFrom(src io.Reader) (int64, error) { + w := g.handle.NewWriter(g.ctx) + + n, err := io.Copy(w, src) + if err != nil && !errors.Is(err, io.EOF) { + return n, fmt.Errorf("failed to copy buffer to persistence: %w", err) + } + + err = w.Close() + if err != nil { + return n, fmt.Errorf("failed to close GCS writer: %w", err) + } + + return n, nil +} + +func (g *GCPBucketStorageObjectProvider) WriteTo(dst io.Writer) (int64, error) { + ctx, cancel := context.WithTimeout(g.ctx, googleReadTimeout) + defer cancel() + + reader, err := g.handle.NewReader(ctx) + if err != nil { + if errors.Is(err, storage.ErrObjectNotExist) { + return 0, ErrorObjectNotExist + } + + return 0, err + } + + defer reader.Close() + + buff := make([]byte, googleBufferSize) + n, err := io.CopyBuffer(dst, reader, buff) + if err != nil { + return n, fmt.Errorf("failed to copy GCS object to writer: %w", err) + } + + return n, nil +} + +func (g *GCPBucketStorageObjectProvider) WriteFromFileSystem(path string) error { + cmd := exec.CommandContext( + g.ctx, + "gcloud", + "storage", + "cp", + "--verbosity", + "error", + path, + fmt.Sprintf("gs://%s/%s", g.storage.bucket.BucketName(), g.path), + ) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to upload file to GCS: %w\n%s", err, string(output)) + } + + return nil +} diff --git a/packages/shared/pkg/storage/template.go b/packages/shared/pkg/storage/template.go index 64afada..e903095 100644 --- a/packages/shared/pkg/storage/template.go +++ b/packages/shared/pkg/storage/template.go @@ -3,8 +3,6 @@ package storage import ( "fmt" "path/filepath" - - "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" ) const ( @@ -14,13 +12,8 @@ const ( KernelMountDir = "/fc-vm" KernelName = "vmlinux.bin" - HostOldEnvdPath = "/fc-envd/envd-v0.0.1" - HostEnvdPath = "/fc-envd/envd" - GuestOldEnvdPath = "/usr/bin/envd-v0.0.1" - GuestEnvdPath = "/usr/bin/envd" - - EnvdVersionKey = "envd_version" - RootfsSizeKey = "rootfs_size" + HostEnvdPath = "/fc-envd/envd" + GuestEnvdPath = "/usr/bin/envd" FirecrackerVersionsDir = "/fc-versions" FirecrackerBinaryName = "firecracker" @@ -34,16 +27,11 @@ const ( HeaderSuffix = ".header" ) -// Path to the directory where the kernel can be accessed inside when the dirs are mounted. -var KernelMountedPath = filepath.Join(KernelMountDir, KernelName) - type TemplateFiles struct { TemplateId string BuildId string KernelVersion string FirecrackerVersion string - - hugePages bool } func NewTemplateFiles( @@ -51,14 +39,12 @@ func NewTemplateFiles( buildId, kernelVersion, firecrackerVersion string, - hugePages bool, ) *TemplateFiles { return &TemplateFiles{ TemplateId: templateId, BuildId: buildId, KernelVersion: kernelVersion, FirecrackerVersion: firecrackerVersion, - hugePages: hugePages, } } @@ -111,34 +97,10 @@ func (t *TemplateFiles) StorageSnapfilePath() string { return fmt.Sprintf("%s/%s", t.StorageDir(), SnapfileName) } -func (t *TemplateFiles) BuildDir() string { +func (t *TemplateFiles) SandboxBuildDir() string { return filepath.Join(EnvsDisk, t.TemplateId, buildDirName, t.BuildId) } -func (t *TemplateFiles) BuildMemfilePath() string { - return filepath.Join(t.BuildDir(), MemfileName) -} - -func (t *TemplateFiles) BuildRootfsPath() string { - return filepath.Join(t.BuildDir(), RootfsName) -} - -func (t *TemplateFiles) BuildSnapfilePath() string { - return filepath.Join(t.BuildDir(), SnapfileName) -} - -func (t *TemplateFiles) Hugepages() bool { - return t.hugePages -} - -func (t *TemplateFiles) MemfilePageSize() int64 { - if t.hugePages { - return header.HugepageSize - } - - return header.PageSize -} - -func (t *TemplateFiles) RootfsBlockSize() int64 { - return header.RootfsBlockSize +func (t *TemplateFiles) SandboxRootfsPath() string { + return filepath.Join(t.SandboxBuildDir(), RootfsName) } diff --git a/packages/shared/pkg/storage/template_build.go b/packages/shared/pkg/storage/template_build.go index 7ef2b69..301b7e7 100644 --- a/packages/shared/pkg/storage/template_build.go +++ b/packages/shared/pkg/storage/template_build.go @@ -8,34 +8,29 @@ import ( "golang.org/x/sync/errgroup" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" + headers "github.com/e2b-dev/infra/packages/shared/pkg/storage/header" ) type TemplateBuild struct { - files *TemplateFiles + files *TemplateFiles + persistence StorageProvider - memfileHeader *header.Header - rootfsHeader *header.Header - - bucket *s3.BucketHandle + memfileHeader *headers.Header + rootfsHeader *headers.Header } -func NewTemplateBuild( - memfileHeader *header.Header, - rootfsHeader *header.Header, - files *TemplateFiles, -) *TemplateBuild { +func NewTemplateBuild(memfileHeader *headers.Header, rootfsHeader *headers.Header, persistence StorageProvider, files *TemplateFiles) *TemplateBuild { return &TemplateBuild{ - bucket: s3.GetTemplateBucket(), + persistence: persistence, + files: files, + memfileHeader: memfileHeader, rootfsHeader: rootfsHeader, - files: files, } } func (t *TemplateBuild) Remove(ctx context.Context) error { - err := s3.RemoveDir(ctx, t.bucket, t.files.StorageDir()) + err := t.persistence.DeleteObjectsWithPrefix(ctx, t.files.StorageDir()) if err != nil { return fmt.Errorf("error when removing template build '%s': %w", t.files.StorageDir(), err) } @@ -43,10 +38,13 @@ func (t *TemplateBuild) Remove(ctx context.Context) error { return nil } -func (t *TemplateBuild) uploadMemfileHeader(ctx context.Context, h *header.Header) error { - object := s3.NewObject(ctx, t.bucket, t.files.StorageMemfileHeaderPath()) +func (t *TemplateBuild) uploadMemfileHeader(ctx context.Context, h *headers.Header) error { + object, err := t.persistence.OpenObject(ctx, t.files.StorageMemfileHeaderPath()) + if err != nil { + return err + } - serialized, err := header.Serialize(h.Metadata, h.Mapping) + serialized, err := headers.Serialize(h.Metadata, h.Mapping) if err != nil { return fmt.Errorf("error when serializing memfile header: %w", err) } @@ -60,10 +58,12 @@ func (t *TemplateBuild) uploadMemfileHeader(ctx context.Context, h *header.Heade } func (t *TemplateBuild) uploadMemfile(ctx context.Context, memfilePath string) error { - object := s3.NewObject(ctx, t.bucket, t.files.StorageMemfilePath()) + object, err := t.persistence.OpenObject(ctx, t.files.StorageMemfilePath()) + if err != nil { + return err + } - err := object.Upload(ctx, memfilePath) - //err := object.UploadWithCli(ctx, memfilePath) + err = object.WriteFromFileSystem(memfilePath) if err != nil { return fmt.Errorf("error when uploading memfile: %w", err) } @@ -71,10 +71,13 @@ func (t *TemplateBuild) uploadMemfile(ctx context.Context, memfilePath string) e return nil } -func (t *TemplateBuild) uploadRootfsHeader(ctx context.Context, h *header.Header) error { - object := s3.NewObject(ctx, t.bucket, t.files.StorageRootfsHeaderPath()) +func (t *TemplateBuild) uploadRootfsHeader(ctx context.Context, h *headers.Header) error { + object, err := t.persistence.OpenObject(ctx, t.files.StorageRootfsHeaderPath()) + if err != nil { + return err + } - serialized, err := header.Serialize(h.Metadata, h.Mapping) + serialized, err := headers.Serialize(h.Metadata, h.Mapping) if err != nil { return fmt.Errorf("error when serializing memfile header: %w", err) } @@ -88,10 +91,12 @@ func (t *TemplateBuild) uploadRootfsHeader(ctx context.Context, h *header.Header } func (t *TemplateBuild) uploadRootfs(ctx context.Context, rootfsPath string) error { - object := s3.NewObject(ctx, t.bucket, t.files.StorageRootfsPath()) + object, err := t.persistence.OpenObject(ctx, t.files.StorageRootfsPath()) + if err != nil { + return err + } - err := object.Upload(ctx, rootfsPath) - //err := object.UploadWithCli(ctx, rootfsPath) + err = object.WriteFromFileSystem(rootfsPath) if err != nil { return fmt.Errorf("error when uploading rootfs: %w", err) } @@ -99,9 +104,12 @@ func (t *TemplateBuild) uploadRootfs(ctx context.Context, rootfsPath string) err return nil } -// Snapfile is small enough so we dont use composite upload. +// Snap-file is small enough so we don't use composite upload. func (t *TemplateBuild) uploadSnapfile(ctx context.Context, snapfile io.Reader) error { - object := s3.NewObject(ctx, t.bucket, t.files.StorageSnapfilePath()) + object, err := t.persistence.OpenObject(ctx, t.files.StorageSnapfilePath()) + if err != nil { + return err + } n, err := object.ReadFrom(snapfile) if err != nil { @@ -111,12 +119,7 @@ func (t *TemplateBuild) uploadSnapfile(ctx context.Context, snapfile io.Reader) return nil } -func (t *TemplateBuild) Upload( - ctx context.Context, - snapfilePath string, - memfilePath *string, - rootfsPath *string, -) chan error { +func (t *TemplateBuild) Upload(ctx context.Context, snapfilePath string, memfilePath *string, rootfsPath *string) chan error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { diff --git a/packages/shared/pkg/storage/template_cache.go b/packages/shared/pkg/storage/template_cache.go index 34f220f..6e52161 100644 --- a/packages/shared/pkg/storage/template_cache.go +++ b/packages/shared/pkg/storage/template_cache.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "os" "path/filepath" "github.com/google/uuid" @@ -9,7 +10,6 @@ import ( const ( templateCacheDir = "/orchestrator/template" - snapshotCacheDir = "/mnt/snapshot-cache" ) type TemplateCacheFiles struct { @@ -18,28 +18,29 @@ type TemplateCacheFiles struct { CacheIdentifier string } -func (f *TemplateFiles) NewTemplateCacheFiles() (*TemplateCacheFiles, error) { +func (t *TemplateFiles) NewTemplateCacheFiles() (*TemplateCacheFiles, error) { identifier, err := uuid.NewRandom() if err != nil { return nil, fmt.Errorf("failed to generate identifier: %w", err) } - return &TemplateCacheFiles{ - TemplateFiles: f, + tcf := &TemplateCacheFiles{ + TemplateFiles: t, CacheIdentifier: identifier.String(), - }, nil -} - -func (c *TemplateCacheFiles) CacheDir() string { - return filepath.Join(templateCacheDir, c.TemplateId, c.BuildId, "cache", c.CacheIdentifier) -} + } -func (c *TemplateCacheFiles) CacheMemfileFullSnapshotPath() string { - name := fmt.Sprintf("%s-%s-%s.full", c.BuildId, MemfileName, c.CacheIdentifier) + err = os.MkdirAll(tcf.cacheDir(), os.ModePerm) + if err != nil { + return nil, fmt.Errorf("failed to create cache dir '%s': %w", tcf.cacheDir(), err) + } - return filepath.Join(snapshotCacheDir, name) + return tcf, nil } func (c *TemplateCacheFiles) CacheSnapfilePath() string { - return filepath.Join(c.CacheDir(), SnapfileName) + return filepath.Join(c.cacheDir(), SnapfileName) +} + +func (c *TemplateCacheFiles) cacheDir() string { + return filepath.Join(templateCacheDir, c.TemplateId, c.BuildId, "cache", c.CacheIdentifier) } diff --git a/packages/shared/pkg/storage/temporary_memfile.go b/packages/shared/pkg/storage/temporary_memfile.go new file mode 100644 index 0000000..98c6579 --- /dev/null +++ b/packages/shared/pkg/storage/temporary_memfile.go @@ -0,0 +1,66 @@ +package storage + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/google/uuid" + "golang.org/x/sync/semaphore" +) + +const ( + // snapshotCacheDir is a tmpfs directory mounted on the host. + // This is used for speed optimization as the final diff is copied to the persistent storage. + snapshotCacheDir = "/mnt/snapshot-cache" + + maxParallelMemfileSnapshotting = 8 +) + +var snapshotCacheQueue = semaphore.NewWeighted(maxParallelMemfileSnapshotting) + +type TemporaryMemfile struct { + path string + closeFn func() +} + +func AcquireTmpMemfile( + ctx context.Context, + buildID string, +) (*TemporaryMemfile, error) { + randomID, err := uuid.NewRandom() + if err != nil { + return nil, fmt.Errorf("failed to generate identifier: %w", err) + } + + err = snapshotCacheQueue.Acquire(ctx, 1) + if err != nil { + return nil, fmt.Errorf("failed to acquire cache: %w", err) + } + releaseOnce := sync.OnceFunc(func() { + snapshotCacheQueue.Release(1) + }) + + return &TemporaryMemfile{ + path: cacheMemfileFullSnapshotPath(buildID, randomID.String()), + closeFn: releaseOnce, + }, nil +} + +func (f *TemporaryMemfile) Path() string { + return f.path +} + +func (f *TemporaryMemfile) Close() error { + defer f.closeFn() + + return os.RemoveAll(f.path) +} + +func cacheMemfileFullSnapshotPath(buildID string, randomID string) string { + name := fmt.Sprintf("%s-%s-%s.full", buildID, MemfileName, randomID) + + return filepath.Join(snapshotCacheDir, name) +} diff --git a/packages/shared/pkg/synchronization/synchronization.go b/packages/shared/pkg/synchronization/synchronization.go new file mode 100644 index 0000000..117ad86 --- /dev/null +++ b/packages/shared/pkg/synchronization/synchronization.go @@ -0,0 +1,150 @@ +package synchronization + +import ( + "context" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" +) + +type Store[SourceItem any, PoolItem any] interface { + SourceList(ctx context.Context) ([]SourceItem, error) + SourceExists(ctx context.Context, s []SourceItem, p PoolItem) bool + + PoolList(ctx context.Context) []PoolItem + PoolExists(ctx context.Context, s SourceItem) bool + PoolInsert(ctx context.Context, s SourceItem) + PoolUpdate(ctx context.Context, s PoolItem) + PoolRemove(ctx context.Context, s PoolItem) +} + +// Synchronize is a generic type that provides methods for synchronizing a pool of items with a source. +// It uses a Store interface to interact with the source and pool, allowing for flexible synchronization logic. +type Synchronize[SourceItem any, PoolItem any] struct { + store Store[SourceItem, PoolItem] + + tracer trace.Tracer + tracerSpanPrefix string + logsPrefix string + + cancel chan struct{} // channel for cancellation of synchronization + cancelOnce sync.Once +} + +func NewSynchronize[SourceItem any, PoolItem any](tracer trace.Tracer, spanPrefix string, logsPrefix string, store Store[SourceItem, PoolItem]) *Synchronize[SourceItem, PoolItem] { + s := &Synchronize[SourceItem, PoolItem]{ + tracer: tracer, + tracerSpanPrefix: spanPrefix, + logsPrefix: logsPrefix, + store: store, + cancel: make(chan struct{}), + } + + return s +} + +func (s *Synchronize[SourceItem, PoolItem]) Start(syncInterval time.Duration, syncRoundTimeout time.Duration, runInitialSync bool) { + if runInitialSync { + initialSyncTimeout, initialSyncCancel := context.WithTimeout(context.Background(), syncRoundTimeout) + err := s.sync(initialSyncTimeout) + initialSyncCancel() + if err != nil { + zap.L().Error(s.getLog("Initial sync failed"), zap.Error(err)) + } + } + + timer := time.NewTicker(syncInterval) + defer timer.Stop() + + for { + select { + case <-s.cancel: + zap.L().Info(s.getLog("Background synchronization ended")) + return + case <-timer.C: + syncTimeout, syncCancel := context.WithTimeout(context.Background(), syncRoundTimeout) + err := s.sync(syncTimeout) + syncCancel() + if err != nil { + zap.L().Error(s.getLog("Failed to synchronize"), zap.Error(err)) + } + } + } +} + +func (s *Synchronize[SourceItem, PoolItem]) Close() { + s.cancelOnce.Do( + func() { close(s.cancel) }, + ) +} + +func (s *Synchronize[SourceItem, PoolItem]) sync(ctx context.Context) error { + spanCtx, span := s.tracer.Start(ctx, s.getSpanName("sync-items")) + defer span.End() + + sourceItems, err := s.store.SourceList(ctx) + if err != nil { + return err + } + + s.syncDiscovered(spanCtx, sourceItems) + s.syncOutdated(spanCtx, sourceItems) + + return nil +} + +func (s *Synchronize[SourceItem, PoolItem]) syncDiscovered(ctx context.Context, sourceItems []SourceItem) { + spanCtx, span := s.tracer.Start(ctx, s.getSpanName("sync-discovered-items")) + defer span.End() + + var wg sync.WaitGroup + defer wg.Wait() + + for _, item := range sourceItems { + // item already exists in the pool, skip it + if ok := s.store.PoolExists(ctx, item); ok { + continue + } + + // initialize newly discovered item + wg.Add(1) + go func(item SourceItem) { + defer wg.Done() + s.store.PoolInsert(spanCtx, item) + }(item) + } +} + +func (s *Synchronize[SourceItem, PoolItem]) syncOutdated(ctx context.Context, sourceItems []SourceItem) { + spanCtx, span := s.tracer.Start(ctx, s.getSpanName("sync-outdated-items")) + defer span.End() + + var wg sync.WaitGroup + defer wg.Wait() + + for _, poolItem := range s.store.PoolList(ctx) { + found := s.store.SourceExists(ctx, sourceItems, poolItem) + if found { + s.store.PoolUpdate(ctx, poolItem) + continue + } + + // remove the item that is no longer present in the source + wg.Add(1) + go func(poolItem PoolItem) { + defer wg.Done() + s.store.PoolRemove(spanCtx, poolItem) + }(poolItem) + } +} + +func (s *Synchronize[SourceItem, PoolItem]) getSpanName(name string) string { + return fmt.Sprintf("%s-%s", s.tracerSpanPrefix, name) +} + +func (s *Synchronize[SourceItem, PoolItem]) getLog(message string) string { + return fmt.Sprintf("%s: %s", s.logsPrefix, message) +} diff --git a/packages/shared/pkg/synchronization/synchronization_test.go b/packages/shared/pkg/synchronization/synchronization_test.go new file mode 100644 index 0000000..d24cb36 --- /dev/null +++ b/packages/shared/pkg/synchronization/synchronization_test.go @@ -0,0 +1,129 @@ +package synchronization + +import ( + "context" + "sync" + "testing" + + "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" +) + +type testStore struct { + mu sync.Mutex + + source []string + pool map[string]string + + inserts int + removes int +} + +func newTestStore(source []string, preExistingPool []string) *testStore { + pool := make(map[string]string, len(preExistingPool)) + for _, k := range preExistingPool { + pool[k] = k + } + + return &testStore{source: source, pool: pool} +} + +func (s *testStore) SourceList(ctx context.Context) ([]string, error) { + return append([]string(nil), s.source...), nil +} + +func (s *testStore) SourceExists(ctx context.Context, source []string, p string) bool { + for _, v := range source { + if v == p { + return true + } + } + + return false +} + +func (s *testStore) PoolList(ctx context.Context) []string { + s.mu.Lock() + defer s.mu.Unlock() + + out := make([]string, 0) + for k := range s.pool { + out = append(out, k) + } + + return out +} + +func (s *testStore) PoolExists(ctx context.Context, item string) bool { + s.mu.Lock() + defer s.mu.Unlock() + _, ok := s.pool[item] + return ok +} + +func (s *testStore) PoolInsert(ctx context.Context, value string) { + s.mu.Lock() + defer s.mu.Unlock() + s.pool[value] = value + s.inserts++ +} + +func (s *testStore) PoolUpdate(ctx context.Context, value string) { /* not used */ } + +func (s *testStore) PoolRemove(ctx context.Context, item string) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.pool { + if v == item { + delete(s.pool, k) + break + } + } + + s.removes++ +} + +func newSynchronizer(store Store[string, string]) *Synchronize[string, string] { + zap.ReplaceGlobals(zap.NewNop()) + return &Synchronize[string, string]{ + store: store, + tracer: noop.NewTracerProvider().Tracer("test"), + tracerSpanPrefix: "test synchronization", + logsPrefix: "test synchronization", + } +} + +func TestSynchronize_InsertAndRemove(t *testing.T) { + ctx := context.Background() + + // Start with empty pool; source has a & b. + s := newTestStore([]string{"a", "b"}, nil) + syncer := newSynchronizer(s) + + if err := syncer.sync(ctx); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if want, got := 2, s.inserts; want != got { + t.Fatalf("insert count mismatch: want %d got %d", want, got) + } + + if len(s.pool) != 2 { + t.Fatalf("pool size want 2 got %d", len(s.pool)) + } + + // Now remove "b" from the source – should trigger exactly one removal. + s.source = []string{"a"} + if err := syncer.sync(ctx); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if want, got := 1, s.removes; want != got { + t.Fatalf("remove count mismatch: want %d got %d", want, got) + } + + if len(s.pool) != 1 || !s.PoolExists(ctx, "a") { + t.Fatalf("pool contents after removal are incorrect: %#v", s.pool) + } +} diff --git a/packages/shared/pkg/telemetry/config.go b/packages/shared/pkg/telemetry/config.go new file mode 100644 index 0000000..26bb24a --- /dev/null +++ b/packages/shared/pkg/telemetry/config.go @@ -0,0 +1,39 @@ +package telemetry + +import ( + "context" + "fmt" + "os" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/semconv/v1.21.0" +) + +var otelCollectorGRPCEndpoint = os.Getenv("OTEL_COLLECTOR_GRPC_ENDPOINT") + +func getResource(ctx context.Context, serviceName, serviceVersion, instanceID string) (*resource.Resource, error) { + attributes := []attribute.KeyValue{ + semconv.ServiceName(serviceName), + semconv.ServiceVersion(serviceVersion), + semconv.ServiceInstanceID(instanceID), + semconv.TelemetrySDKName("otel"), + semconv.TelemetrySDKLanguageGo, + } + + hostname, err := os.Hostname() + if err == nil { + attributes = append(attributes, semconv.HostName(hostname)) + } + + res, err := resource.New( + ctx, + resource.WithSchemaURL(semconv.SchemaURL), + resource.WithAttributes(attributes...), + ) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + + return res, nil +} diff --git a/packages/shared/pkg/telemetry/fields.go b/packages/shared/pkg/telemetry/fields.go new file mode 100644 index 0000000..c177ec1 --- /dev/null +++ b/packages/shared/pkg/telemetry/fields.go @@ -0,0 +1,152 @@ +package telemetry + +import ( + "fmt" + "time" + + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +func WithSandboxID(sandboxID string) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithSandboxID(sandboxID)) +} + +func WithTemplateID(templateID string) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithTemplateID(templateID)) +} + +func WithBuildID(buildID string) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithBuildID(buildID)) +} + +func WithClusterID(clusterID uuid.UUID) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithClusterID(clusterID)) +} + +func WithClusterNodeID(nodeID string) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithClusterNodeID(nodeID)) +} + +func WithTeamID(teamID string) attribute.KeyValue { + return zapFieldToOTELAttribute(logger.WithTeamID(teamID)) +} + +func zapFieldToOTELAttribute(f zap.Field) attribute.KeyValue { + e := &ZapFieldToOTELAttributeEncoder{} + f.AddTo(e) + return e.KeyValue +} + +type ZapFieldToOTELAttributeEncoder struct { + attribute.KeyValue +} + +func (z *ZapFieldToOTELAttributeEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { + return nil +} + +func (z *ZapFieldToOTELAttributeEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { + return nil +} + +func (z *ZapFieldToOTELAttributeEncoder) AddBinary(key string, value []byte) { + z.KeyValue = attribute.String(key, fmt.Sprintf("%x", value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddByteString(key string, value []byte) { + z.KeyValue = attribute.String(key, string(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddBool(key string, value bool) { + z.KeyValue = attribute.Bool(key, value) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddComplex128(key string, value complex128) { + z.KeyValue = attribute.String(key, fmt.Sprintf("%v", value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddComplex64(key string, value complex64) { + z.KeyValue = attribute.String(key, fmt.Sprintf("%v", value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddDuration(key string, value time.Duration) { + z.KeyValue = attribute.Int64(key, value.Microseconds()) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddFloat64(key string, value float64) { + z.KeyValue = attribute.Float64(key, value) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddFloat32(key string, value float32) { + z.KeyValue = attribute.Float64(key, float64(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddInt(key string, value int) { + z.KeyValue = attribute.Int(key, value) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddInt64(key string, value int64) { + z.KeyValue = attribute.Int64(key, value) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddInt32(key string, value int32) { + z.KeyValue = attribute.Int(key, int(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddInt16(key string, value int16) { + z.KeyValue = attribute.Int(key, int(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddInt8(key string, value int8) { + z.KeyValue = attribute.Int(key, int(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddString(key, value string) { + z.KeyValue = attribute.String(key, value) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddTime(key string, value time.Time) { + z.KeyValue = attribute.String(key, value.String()) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUint(key string, value uint) { + z.KeyValue = attribute.Int64(key, int64(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUint64(key string, value uint64) { + asInt64 := int64(value) + if asInt64 > 0 { + z.KeyValue = attribute.Int64(key, asInt64) + } else { + z.KeyValue = attribute.String(key, "") + } +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUint32(key string, value uint32) { + z.KeyValue = attribute.Int64(key, int64(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUint16(key string, value uint16) { + z.KeyValue = attribute.Int(key, int(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUint8(key string, value uint8) { + z.KeyValue = attribute.Int(key, int(value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddUintptr(key string, value uintptr) { + z.KeyValue = attribute.String(key, fmt.Sprintf("%v", value)) +} + +func (z *ZapFieldToOTELAttributeEncoder) AddReflected(key string, value interface{}) error { + z.KeyValue = attribute.String(key, fmt.Sprintf("%v", value)) + return nil +} + +func (z *ZapFieldToOTELAttributeEncoder) OpenNamespace(key string) { +} diff --git a/packages/shared/pkg/telemetry/logs.go b/packages/shared/pkg/telemetry/logs.go new file mode 100644 index 0000000..210f3dd --- /dev/null +++ b/packages/shared/pkg/telemetry/logs.go @@ -0,0 +1,53 @@ +package telemetry + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/log" + sdklog "go.opentelemetry.io/otel/sdk/log" + "google.golang.org/grpc/encoding/gzip" +) + +type noopLogExporter struct{} + +func (noopLogExporter) Export(context.Context, []sdklog.Record) error { return nil } + +func (noopLogExporter) Shutdown(context.Context) error { return nil } + +func (noopLogExporter) ForceFlush(context.Context) error { return nil } + +func NewLogExporter(ctx context.Context, extraOption ...otlploggrpc.Option) (sdklog.Exporter, error) { + opts := []otlploggrpc.Option{ + otlploggrpc.WithInsecure(), + otlploggrpc.WithEndpoint(otelCollectorGRPCEndpoint), + otlploggrpc.WithCompressor(gzip.Name), + } + opts = append(opts, extraOption...) + + logsExporter, err := otlploggrpc.New( + ctx, + opts..., + ) + if err != nil { + return nil, fmt.Errorf("failed to create logs exporter: %w", err) + } + + return logsExporter, nil +} + +func NewLogProvider(ctx context.Context, logsExporter sdklog.Exporter, serviceName, serviceVersion, instanceID string) (log.LoggerProvider, error) { + res, err := getResource(ctx, serviceName, serviceVersion, instanceID) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + + logsProcessor := sdklog.NewBatchProcessor(logsExporter) + logsProvider := sdklog.NewLoggerProvider( + sdklog.WithResource(res), + sdklog.WithProcessor(logsProcessor), + ) + + return logsProvider, nil +} diff --git a/packages/shared/pkg/telemetry/main.go b/packages/shared/pkg/telemetry/main.go new file mode 100644 index 0000000..f43bc2f --- /dev/null +++ b/packages/shared/pkg/telemetry/main.go @@ -0,0 +1,125 @@ +package telemetry + +import ( + "context" + "errors" + "fmt" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/log" + noopLogs "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + noopMetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/propagation" + sdklog "go.opentelemetry.io/otel/sdk/log" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + noopTrace "go.opentelemetry.io/otel/trace/noop" +) + +const metricExportPeriod = 15 * time.Second + +type Client struct { + MetricExporter sdkmetric.Exporter + MeterProvider metric.MeterProvider + SpanExporter sdktrace.SpanExporter + TracerProvider trace.TracerProvider + TracePropagator propagation.TextMapPropagator + LogsExporter sdklog.Exporter + LogsProvider log.LoggerProvider +} + +func New(ctx context.Context, serviceName, commitSHA, clientID string) (*Client, error) { + // Setup metrics + metricsExporter, err := NewMeterExporter(ctx, otlpmetricgrpc.WithAggregationSelector(func(kind sdkmetric.InstrumentKind) sdkmetric.Aggregation { + if kind == sdkmetric.InstrumentKindHistogram { + // Defaults from https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: 160, + MaxScale: 20, + NoMinMax: false, + } + } + return sdkmetric.DefaultAggregationSelector(kind) + })) + if err != nil { + return nil, fmt.Errorf("failed to create metrics exporter: %w", err) + } + + meterProvider, err := NewMeterProvider(ctx, metricsExporter, metricExportPeriod, serviceName, commitSHA, clientID) + if err != nil { + return nil, fmt.Errorf("failed to create metrics provider: %w", err) + } + + // Setup logging + logsExporter, err := NewLogExporter(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create logs exporter: %w", err) + } + + logsProvider, err := NewLogProvider(ctx, logsExporter, serviceName, commitSHA, clientID) + if err != nil { + return nil, fmt.Errorf("failed to create logs provider: %w", err) + } + + // Setup tracing + spanExporter, err := NewSpanExporter(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create span exporter: %w", err) + } + + tracerProvider, err := NewTracerProvider(ctx, spanExporter, serviceName, commitSHA, clientID) + if err != nil { + return nil, fmt.Errorf("failed to create tracer provider: %w", err) + } + + // There's probably not a reason why not to set the trace propagator globally, it's used in SDKs + propagator := NewTextPropagator() + otel.SetTextMapPropagator(propagator) + + return &Client{ + MetricExporter: metricsExporter, + MeterProvider: meterProvider, + SpanExporter: spanExporter, + TracerProvider: tracerProvider, + TracePropagator: propagator, + LogsExporter: logsExporter, + LogsProvider: logsProvider, + }, nil +} + +func (t *Client) Shutdown(ctx context.Context) error { + var errs []error + if t.MetricExporter != nil { + if err := t.MetricExporter.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + if t.SpanExporter != nil { + if err := t.SpanExporter.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + if t.LogsExporter != nil { + if err := t.LogsExporter.Shutdown(ctx); err != nil { + errs = append(errs, err) + } + } + + return errors.Join(errs...) +} + +func NewNoopClient() *Client { + return &Client{ + MetricExporter: &noopMetricExporter{}, + MeterProvider: noopMetric.MeterProvider{}, + SpanExporter: &noopSpanExporter{}, + TracerProvider: noopTrace.NewTracerProvider(), + TracePropagator: propagation.NewCompositeTextMapPropagator(), + LogsExporter: &noopLogExporter{}, + LogsProvider: noopLogs.NewLoggerProvider(), + } +} diff --git a/packages/shared/pkg/telemetry/meters.go b/packages/shared/pkg/telemetry/meters.go new file mode 100644 index 0000000..4742028 --- /dev/null +++ b/packages/shared/pkg/telemetry/meters.go @@ -0,0 +1,160 @@ +package telemetry + +import "go.opentelemetry.io/otel/metric" + +type ( + CounterType string + GaugeFloatType string + GaugeIntType string + UpDownCounterType string + ObservableUpDownCounterType string +) + +const ( + SandboxCreateMeterName CounterType = "api.env.instance.started" +) + +const ( + SandboxCountMeterName UpDownCounterType = "api.env.instance.running" + NewNetworkSlotSPoolCounterMeterName UpDownCounterType = "orchestrator.network.slots_pool.new" + ReusedNetworkSlotSPoolCounterMeterName UpDownCounterType = "orchestrator.network.slots_pool.reused" + NBDkSlotSReadyPoolCounterMeterName UpDownCounterType = "orchestrator.nbd.slots_pool.read" +) + +const ( + OrchestratorSandboxCountMeterName ObservableUpDownCounterType = "orchestrator.env.sandbox.running" + + ClientProxyServerConnectionsMeterCounterName ObservableUpDownCounterType = "client_proxy.proxy.server.connections.open" + ClientProxyPoolConnectionsMeterCounterName ObservableUpDownCounterType = "client_proxy.proxy.pool.connections.open" + ClientProxyPoolSizeMeterCounterName ObservableUpDownCounterType = "client_proxy.proxy.pool.size" + + OrchestratorProxyServerConnectionsMeterCounterName ObservableUpDownCounterType = "orchestrator.proxy.server.connections.open" + OrchestratorProxyPoolConnectionsMeterCounterName ObservableUpDownCounterType = "orchestrator.proxy.pool.connections.open" + OrchestratorProxyPoolSizeMeterCounterName ObservableUpDownCounterType = "orchestrator.proxy.pool.size" + + BuildCounterMeterName ObservableUpDownCounterType = "api.env.build.running" +) + +const ( + SandboxCpuUsedGaugeName GaugeFloatType = "e2b.sandbox.cpu.used" +) + +const ( + ApiOrchestratorCountMeterName GaugeIntType = "api.orchestrator.status" + + SandboxRamUsedGaugeName GaugeIntType = "e2b.sandbox.ram.used" + SandboxRamTotalGaugeName GaugeIntType = "e2b.sandbox.ram.total" + SandboxCpuTotalGaugeName GaugeIntType = "e2b.sandbox.cpu.total" +) + +var counterDesc = map[CounterType]string{ + SandboxCreateMeterName: "Number of currently waiting requests to create a new sandbox", +} + +var counterUnits = map[CounterType]string{ + SandboxCreateMeterName: "{sandbox}", +} + +var upDownCounterDesc = map[UpDownCounterType]string{ + SandboxCountMeterName: "Counter of started instances.", + ReusedNetworkSlotSPoolCounterMeterName: "Number of reused network slots ready to be used.", + NewNetworkSlotSPoolCounterMeterName: "Number of new network slots ready to be used.", + NBDkSlotSReadyPoolCounterMeterName: "Number of nbd slots ready to be used.", +} + +var upDownCounterUnits = map[UpDownCounterType]string{ + SandboxCountMeterName: "{sandbox}", + ReusedNetworkSlotSPoolCounterMeterName: "{slot}", + NewNetworkSlotSPoolCounterMeterName: "{slot}", + NBDkSlotSReadyPoolCounterMeterName: "{slot}", +} + +var observableUpDownCounterDesc = map[ObservableUpDownCounterType]string{ + OrchestratorSandboxCountMeterName: "Counter of running sandboxes on the orchestrator.", + ClientProxyServerConnectionsMeterCounterName: "Open connections to the client proxy from load balancer.", + ClientProxyPoolConnectionsMeterCounterName: "Open connections from the client proxy to the orchestrator proxy.", + ClientProxyPoolSizeMeterCounterName: "Size of the client proxy pool.", + OrchestratorProxyServerConnectionsMeterCounterName: "Open connections to the orchestrator proxy from client proxies.", + OrchestratorProxyPoolConnectionsMeterCounterName: "Open connections from the orchestrator proxy to sandboxes.", + OrchestratorProxyPoolSizeMeterCounterName: "Size of the orchestrator proxy pool.", + BuildCounterMeterName: "Counter of running builds.", +} + +var observableUpDownCounterUnits = map[ObservableUpDownCounterType]string{ + OrchestratorSandboxCountMeterName: "{sandbox}", + ClientProxyServerConnectionsMeterCounterName: "{connection}", + ClientProxyPoolConnectionsMeterCounterName: "{connection}", + ClientProxyPoolSizeMeterCounterName: "{transport}", + OrchestratorProxyServerConnectionsMeterCounterName: "{connection}", + OrchestratorProxyPoolConnectionsMeterCounterName: "{connection}", + OrchestratorProxyPoolSizeMeterCounterName: "{transport}", + BuildCounterMeterName: "{build}", +} + +var gaugeFloatDesc = map[GaugeFloatType]string{ + SandboxCpuUsedGaugeName: "Amount of CPU used by the sandbox.", +} + +var gaugeFloatUnits = map[GaugeFloatType]string{ + SandboxCpuUsedGaugeName: "{percent}", +} + +var gaugeIntDesc = map[GaugeIntType]string{ + ApiOrchestratorCountMeterName: "Counter of running orchestrators.", + SandboxRamUsedGaugeName: "Amount of RAM used by the sandbox.", + SandboxRamTotalGaugeName: "Amount of RAM available to the sandbox.", + SandboxCpuTotalGaugeName: "Amount of CPU available to the sandbox.", +} + +var gaugeIntUnits = map[GaugeIntType]string{ + ApiOrchestratorCountMeterName: "{orchestrator}", + SandboxRamUsedGaugeName: "{By}", + SandboxRamTotalGaugeName: "{By}", + SandboxCpuTotalGaugeName: "{count}", +} + +func GetCounter(meter metric.Meter, name CounterType) (metric.Int64Counter, error) { + desc := counterDesc[name] + unit := counterUnits[name] + return meter.Int64Counter(string(name), + metric.WithDescription(desc), + metric.WithUnit(unit), + ) +} + +func GetUpDownCounter(meter metric.Meter, name UpDownCounterType) (metric.Int64UpDownCounter, error) { + desc := upDownCounterDesc[name] + unit := upDownCounterUnits[name] + return meter.Int64UpDownCounter(string(name), + metric.WithDescription(desc), + metric.WithUnit(unit), + ) +} + +func GetObservableUpDownCounter(meter metric.Meter, name ObservableUpDownCounterType, callback metric.Int64Callback) (metric.Int64ObservableUpDownCounter, error) { + desc := observableUpDownCounterDesc[name] + unit := observableUpDownCounterUnits[name] + return meter.Int64ObservableUpDownCounter(string(name), + metric.WithDescription(desc), + metric.WithUnit(unit), + metric.WithInt64Callback(callback), + ) +} + +func GetGaugeFloat(meter metric.Meter, name GaugeFloatType) (metric.Float64ObservableGauge, error) { + desc := gaugeFloatDesc[name] + unit := gaugeFloatUnits[name] + return meter.Float64ObservableGauge(string(name), + metric.WithDescription(desc), + metric.WithUnit(unit), + ) +} + +func GetGaugeInt(meter metric.Meter, name GaugeIntType) (metric.Int64ObservableGauge, error) { + desc := gaugeIntDesc[name] + unit := gaugeIntUnits[name] + return meter.Int64ObservableGauge(string(name), + metric.WithDescription(desc), + metric.WithUnit(unit), + ) +} diff --git a/packages/shared/pkg/telemetry/metrics.go b/packages/shared/pkg/telemetry/metrics.go new file mode 100644 index 0000000..a2f5180 --- /dev/null +++ b/packages/shared/pkg/telemetry/metrics.go @@ -0,0 +1,72 @@ +package telemetry + +import ( + "context" + "fmt" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type noopMetricExporter struct{} + +func (noopMetricExporter) Temporality(sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func (noopMetricExporter) Aggregation(sdkmetric.InstrumentKind) sdkmetric.Aggregation { + return sdkmetric.AggregationDrop{} +} + +func (noopMetricExporter) Export(context.Context, *metricdata.ResourceMetrics) error { + return nil +} + +func (noopMetricExporter) ForceFlush(context.Context) error { + return nil +} + +func (noopMetricExporter) Shutdown(context.Context) error { + return nil +} + +func NewMeterExporter(ctx context.Context, extraOption ...otlpmetricgrpc.Option) (sdkmetric.Exporter, error) { + opts := []otlpmetricgrpc.Option{ + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithEndpoint(otelCollectorGRPCEndpoint), + } + opts = append(opts, extraOption...) + + metricExporter, metricErr := otlpmetricgrpc.New( + ctx, + opts..., + ) + if metricErr != nil { + return nil, fmt.Errorf("failed to create metric exporter: %w", metricErr) + } + + return metricExporter, nil +} + +func NewMeterProvider(ctx context.Context, metricsExporter sdkmetric.Exporter, metricExportPeriod time.Duration, serviceName, commitSHA, clientID string, extraOption ...sdkmetric.Option) (metric.MeterProvider, error) { + res, err := getResource(ctx, serviceName, commitSHA, clientID) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + + opts := []sdkmetric.Option{ + sdkmetric.WithResource(res), + sdkmetric.WithReader( + sdkmetric.NewPeriodicReader( + metricsExporter, + sdkmetric.WithInterval(metricExportPeriod), + ), + ), + } + opts = append(opts, extraOption...) + + return sdkmetric.NewMeterProvider(opts...), nil +} diff --git a/packages/shared/pkg/telemetry/otel.go b/packages/shared/pkg/telemetry/otel.go deleted file mode 100644 index 5d654d5..0000000 --- a/packages/shared/pkg/telemetry/otel.go +++ /dev/null @@ -1,144 +0,0 @@ -package telemetry - -import ( - "context" - "fmt" - "log" - "os" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.21.0" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/encoding/gzip" -) - -const ( - metricExportPeriod = 15 * time.Second -) - -var otelCollectorGRPCEndpoint = os.Getenv("OTEL_COLLECTOR_GRPC_ENDPOINT") - -type client struct { - tracerProvider *sdktrace.TracerProvider - meterProvider *metric.MeterProvider -} - -// InitOTLPExporter initializes an OTLP exporter, and configures the corresponding trace providers. -func InitOTLPExporter(ctx context.Context, serviceName, serviceVersion string) func(ctx context.Context) error { - attributes := []attribute.KeyValue{ - semconv.ServiceName(serviceName), - semconv.ServiceVersion(serviceVersion), - semconv.TelemetrySDKName("otel"), - semconv.TelemetrySDKLanguageGo, - } - - hostname, err := os.Hostname() - if err == nil { - attributes = append(attributes, semconv.HostName(hostname)) - } - - res, err := resource.New( - ctx, - resource.WithSchemaURL(semconv.SchemaURL), - resource.WithAttributes(attributes...), - ) - if err != nil { - panic(fmt.Errorf("failed to create resource: %w", err)) - } - - var otelClient client - - go func() { - // Set up a connection to the collector. - var conn *grpc.ClientConn - - retryInterval := 5 * time.Second - - for { - dialCtx, cancel := context.WithTimeout(ctx, time.Second) - - conn, err = grpc.DialContext(dialCtx, - otelCollectorGRPCEndpoint, - // Note the use of insecure transport here. TLS is recommended in production. - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - ) - - cancel() - - if err != nil { - log.Printf("Failed to connect to otel collector: %v", err) - time.Sleep(retryInterval) - } else { - break - } - } - - // Set up a trace exporter - traceExporter, traceErr := otlptracegrpc.New( - ctx, - otlptracegrpc.WithGRPCConn(conn), - otlptracegrpc.WithCompressor(gzip.Name), - ) - if traceErr != nil { - panic(fmt.Errorf("failed to create trace exporter: %w", err)) - } - - // Register the trace exporter with a TracerProvider, using a batch - // span processor to aggregate spans before export. - bsp := sdktrace.NewBatchSpanProcessor(traceExporter) - tracerProvider := sdktrace.NewTracerProvider( - sdktrace.WithSampler(sdktrace.AlwaysSample()), - sdktrace.WithResource(res), - sdktrace.WithSpanProcessor(bsp), - ) - - otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) - otel.SetTracerProvider(tracerProvider) - - metricExporter, metricErr := otlpmetricgrpc.New(ctx, otlpmetricgrpc.WithGRPCConn(conn)) - if metricErr != nil { - panic(fmt.Errorf("failed to create metric exporter: %w", err)) - } - - meterProvider := metric.NewMeterProvider( - metric.WithResource(res), - metric.WithReader( - metric.NewPeriodicReader( - metricExporter, - metric.WithInterval(metricExportPeriod), - ), - ), - ) - - otel.SetMeterProvider(meterProvider) - }() - - // Shutdown will flush any remaining spans and shut down the exporter. - return otelClient.close -} - -func (c *client) close(ctx context.Context) error { - if c.tracerProvider != nil { - if err := c.tracerProvider.Shutdown(ctx); err != nil { - return err - } - } - - if c.meterProvider != nil { - if err := c.meterProvider.Shutdown(ctx); err != nil { - return err - } - } - - return nil -} diff --git a/packages/shared/pkg/telemetry/traces.go b/packages/shared/pkg/telemetry/traces.go new file mode 100644 index 0000000..7ee5b56 --- /dev/null +++ b/packages/shared/pkg/telemetry/traces.go @@ -0,0 +1,62 @@ +package telemetry + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "google.golang.org/grpc/encoding/gzip" +) + +type noopSpanExporter struct{} + +// ExportSpans handles export of spans by dropping them. +func (nsb *noopSpanExporter) ExportSpans(context.Context, []sdktrace.ReadOnlySpan) error { return nil } + +// Shutdown stops the exporter by doing nothing. +func (nsb *noopSpanExporter) Shutdown(context.Context) error { return nil } + +func NewSpanExporter(ctx context.Context, extraOption ...otlptracegrpc.Option) (sdktrace.SpanExporter, error) { + opts := []otlptracegrpc.Option{ + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(otelCollectorGRPCEndpoint), + otlptracegrpc.WithCompressor(gzip.Name), + } + opts = append(opts, extraOption...) + + // Set up a trace exporter + traceExporter, traceErr := otlptracegrpc.New( + ctx, + opts..., + ) + if traceErr != nil { + return nil, fmt.Errorf("failed to create trace exporter: %w", traceErr) + } + + return traceExporter, nil +} + +func NewTracerProvider(ctx context.Context, spanExporter sdktrace.SpanExporter, serviceName, serviceVersion string, instanceID string) (trace.TracerProvider, error) { + res, err := getResource(ctx, serviceName, serviceVersion, instanceID) + if err != nil { + return nil, fmt.Errorf("failed to create resource: %w", err) + } + + // Register the trace exporter with a TracerProvider, using a batch + // span processor to aggregate spans before export. + bsp := sdktrace.NewBatchSpanProcessor(spanExporter) + tracerProvider := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.AlwaysSample()), + sdktrace.WithResource(res), + sdktrace.WithSpanProcessor(bsp), + ) + + return tracerProvider, nil +} + +func NewTextPropagator() propagation.TextMapPropagator { + return propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) +} diff --git a/packages/shared/pkg/telemetry/tracing.go b/packages/shared/pkg/telemetry/tracing.go index df782ee..04c609a 100644 --- a/packages/shared/pkg/telemetry/tracing.go +++ b/packages/shared/pkg/telemetry/tracing.go @@ -8,6 +8,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" ) var OTELTracingPrint = os.Getenv("OTEL_TRACING_PRINT") != "false" @@ -72,49 +73,31 @@ func ReportEvent(ctx context.Context, name string, attrs ...attribute.KeyValue) ) } -func ReportCriticalError(ctx context.Context, err error, attrs ...attribute.KeyValue) { +func ReportCriticalError(ctx context.Context, message string, err error, attrs ...attribute.KeyValue) { span := trace.SpanFromContext(ctx) - if OTELTracingPrint { - var msg string + debugID := getDebugID(ctx) + zap.L().With(attributesToZapFields(attrs...)...).Error(message, zap.Stringp("debug_id", debugID), zap.Error(err)) - if len(attrs) == 0 { - msg = fmt.Sprintf("Critical error: %v\n", err) - } else { - msg = fmt.Sprintf("Critical error: %v - %#v\n", err, attrs) - } + errorAttrs := append(attrs, attribute.String("error.message", message)) - debugID := getDebugID(ctx) - fmt.Fprint(os.Stderr, debugFormat(debugID, msg)) - } - - span.RecordError(err, + span.RecordError(fmt.Errorf("%s: %w", message, err), trace.WithStackTrace(true), trace.WithAttributes( - attrs..., + errorAttrs..., ), ) - span.SetStatus(codes.Error, "critical error") + span.SetStatus(codes.Error, message) } -func ReportError(ctx context.Context, err error, attrs ...attribute.KeyValue) { +func ReportError(ctx context.Context, message string, err error, attrs ...attribute.KeyValue) { span := trace.SpanFromContext(ctx) - if OTELTracingPrint { - var msg string - - if len(attrs) == 0 { - msg = fmt.Sprintf("Error: %v\n", err) - } else { - msg = fmt.Sprintf("Error: %v - %#v\n", err, attrs) - } + debugID := getDebugID(ctx) + zap.L().With(attributesToZapFields(attrs...)...).Warn(message, zap.Stringp("debug_id", debugID), zap.Error(err)) - debugID := getDebugID(ctx) - fmt.Fprint(os.Stderr, debugFormat(debugID, msg)) - } - - span.RecordError(err, + span.RecordError(fmt.Errorf("%s: %w", message, err), trace.WithStackTrace(true), trace.WithAttributes( attrs..., @@ -127,6 +110,7 @@ func GetContextFromRemote(ctx context.Context, tracer trace.Tracer, name, spanID if traceIDErr != nil { ReportError( ctx, + traceIDErr.Error(), traceIDErr, attribute.String("trace.id", traceID), attribute.Int("trace.id.length", len(traceID)), @@ -137,6 +121,7 @@ func GetContextFromRemote(ctx context.Context, tracer trace.Tracer, name, spanID if spanIDErr != nil { ReportError( ctx, + spanIDErr.Error(), spanIDErr, attribute.String("span.id", spanID), attribute.Int("span.id.length", len(spanID)), @@ -157,3 +142,31 @@ func GetContextFromRemote(ctx context.Context, tracer trace.Tracer, name, spanID ), ) } + +func attributesToZapFields(attrs ...attribute.KeyValue) []zap.Field { + fields := make([]zap.Field, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + switch attr.Value.Type() { + case attribute.STRING: + fields = append(fields, zap.String(key, attr.Value.AsString())) + case attribute.INT64: + fields = append(fields, zap.Int64(key, attr.Value.AsInt64())) + case attribute.FLOAT64: + fields = append(fields, zap.Float64(key, attr.Value.AsFloat64())) + case attribute.BOOL: + fields = append(fields, zap.Bool(key, attr.Value.AsBool())) + case attribute.BOOLSLICE: + fields = append(fields, zap.Bools(key, attr.Value.AsBoolSlice())) + case attribute.INT64SLICE: + fields = append(fields, zap.Int64s(key, attr.Value.AsInt64Slice())) + case attribute.FLOAT64SLICE: + fields = append(fields, zap.Float64s(key, attr.Value.AsFloat64Slice())) + case attribute.STRINGSLICE: + fields = append(fields, zap.Strings(key, attr.Value.AsStringSlice())) + default: + fields = append(fields, zap.Any(key, attr.Value.AsInterface())) + } + } + return fields +} diff --git a/packages/shared/pkg/utils/context.go b/packages/shared/pkg/utils/context.go deleted file mode 100644 index cd4e7bf..0000000 --- a/packages/shared/pkg/utils/context.go +++ /dev/null @@ -1,47 +0,0 @@ -package utils - -import ( - "context" - "sync" - "time" -) - -type LockableCancelableContext struct { - ctx context.Context - mu sync.Mutex - cancel context.CancelFunc -} - -func NewLockableCancelableContext(ctx context.Context) *LockableCancelableContext { - lcc := &LockableCancelableContext{} - lcc.ctx, lcc.cancel = context.WithCancel(ctx) - return lcc -} - -func (lcc *LockableCancelableContext) Lock() { - lcc.mu.Lock() -} - -func (lcc *LockableCancelableContext) Unlock() { - lcc.mu.Unlock() -} - -func (lcc *LockableCancelableContext) Done() <-chan struct{} { - return lcc.ctx.Done() -} - -func (lcc *LockableCancelableContext) Err() error { - return lcc.ctx.Err() -} - -func (lcc *LockableCancelableContext) Value(key interface{}) interface{} { - return lcc.ctx.Value(key) -} - -func (lcc *LockableCancelableContext) Cancel() { - lcc.cancel() -} - -func (lcc *LockableCancelableContext) Deadline() (deadline time.Time, ok bool) { - return lcc.ctx.Deadline() -} diff --git a/packages/shared/pkg/utils/filter.go b/packages/shared/pkg/utils/filter.go new file mode 100644 index 0000000..a9408d9 --- /dev/null +++ b/packages/shared/pkg/utils/filter.go @@ -0,0 +1,14 @@ +package utils + +// Filter takes a slice of any type T and a predicate function f. +// It returns a new slice containing only the elements from the input slice +// for which the predicate function returns true. +func Filter[T any](input []T, f func(T) bool) []T { + var output []T + for _, v := range input { + if f(v) { + output = append(output, v) + } + } + return output +} diff --git a/packages/shared/pkg/utils/map.go b/packages/shared/pkg/utils/map.go new file mode 100644 index 0000000..fb08a93 --- /dev/null +++ b/packages/shared/pkg/utils/map.go @@ -0,0 +1,11 @@ +package utils + +// Map goes through each item in the input slice and applies the function f to it. +// It returns a new slice with the results. +func Map[T any, U any](input []T, f func(T) U) []U { + output := make([]U, len(input)) + for i, v := range input { + output[i] = f(v) + } + return output +} diff --git a/packages/shared/pkg/utils/map_values.go b/packages/shared/pkg/utils/map_values.go new file mode 100644 index 0000000..7d5b6a6 --- /dev/null +++ b/packages/shared/pkg/utils/map_values.go @@ -0,0 +1,10 @@ +package utils + +// MapValues takes a map and returns a slice of all its values. +func MapValues[K comparable, V any](m map[K]V) []V { + values := make([]V, 0, len(m)) + for _, v := range m { + values = append(values, v) + } + return values +} diff --git a/packages/shared/pkg/utils/set_once.go b/packages/shared/pkg/utils/set_once.go index 1aefdad..2fa4cd7 100644 --- a/packages/shared/pkg/utils/set_once.go +++ b/packages/shared/pkg/utils/set_once.go @@ -15,7 +15,7 @@ type SetOnce[T any] struct { setDone func() done chan struct{} res *result[T] - mu sync.RWMutex + mu sync.RWMutex } func NewSetOnce[T any]() *SetOnce[T] { diff --git a/packages/shared/pkg/utils/symlink.go b/packages/shared/pkg/utils/symlink.go new file mode 100644 index 0000000..b06cf3c --- /dev/null +++ b/packages/shared/pkg/utils/symlink.go @@ -0,0 +1,11 @@ +package utils + +import ( + "os" +) + +func SymlinkForce(oldname, newname string) error { + // Ignore error if the symlink does not exist + _ = os.Remove(newname) + return os.Symlink(oldname, newname) +} diff --git a/packages/shared/pkg/utils/version.go b/packages/shared/pkg/utils/version.go new file mode 100644 index 0000000..88cf379 --- /dev/null +++ b/packages/shared/pkg/utils/version.go @@ -0,0 +1,15 @@ +package utils + +import "golang.org/x/mod/semver" + +func IsGTEVersion(curVersion, minVersion string) bool { + if len(curVersion) > 0 && curVersion[0] != 'v' { + curVersion = "v" + curVersion + } + + if !semver.IsValid(curVersion) { + return false + } + + return semver.Compare(curVersion, minVersion) >= 0 +} diff --git a/packages/shared/scripts/aws-seed.sh b/packages/shared/scripts/aws-seed.sh deleted file mode 100755 index f265e9f..0000000 --- a/packages/shared/scripts/aws-seed.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash -set -e - -echo "AWS Database Seed Script" -echo "========================" - -# Ensure all required environment variables are set -DB_HOST=${DB_HOST:-"xxxxxx"} -DB_PORT=${DB_PORT:-"5432"} -DB_USER=${DB_USER:-"xxxx"} -DB_PASSWORD=${DB_PASSWORD:-"xxxxxx"} -DB_NAME=${DB_NAME:-"postgres"} - -echo "Using database: $DB_HOST" - -# Create the team -TEAM_SQL=" -INSERT INTO teams (id, name, email, tier) -VALUES ('00000000-0000-0000-0000-000000000000', 'E2B', 'admin@example.com', 'base_v1') -ON CONFLICT (id) DO UPDATE SET name = 'E2B', email = 'admin@example.com' -RETURNING id;" - -# Create a user -USER_ID=$(uuidgen) -USER_SQL=" -INSERT INTO users (id, email) -VALUES ('$USER_ID', 'admin@example.com') -ON CONFLICT (email) DO UPDATE SET email = 'admin@example.com' -RETURNING id;" - -# Associate user with team -USER_TEAM_SQL=" -INSERT INTO users_teams (user_id, team_id, is_default) -VALUES ('$USER_ID', '00000000-0000-0000-0000-000000000000', true) -ON CONFLICT (user_id, team_id) DO UPDATE SET is_default = true;" - -# Create access token -TOKEN_SQL=" -INSERT INTO access_tokens (id, user_id) -VALUES ('e2b_access_token', '$USER_ID') -ON CONFLICT (id) DO UPDATE SET user_id = '$USER_ID';" - -# Create team API key -TEAM_API_KEY_SQL=" -INSERT INTO team_api_keys (api_key, team_id) -VALUES ('e2b_team_api_key', '00000000-0000-0000-0000-000000000000') -ON CONFLICT (api_key) DO UPDATE SET team_id = '00000000-0000-0000-0000-000000000000';" - -# Create environment (template) -ENV_SQL=" -INSERT INTO envs (id, team_id, public) -VALUES ('rki5dems9wqfm4r03t7g', '00000000-0000-0000-0000-000000000000', true) -ON CONFLICT (id) DO UPDATE SET team_id = '00000000-0000-0000-0000-000000000000', public = true;" - -# Combine all SQL statements -SQL="BEGIN; -$TEAM_SQL -$USER_SQL -$USER_TEAM_SQL -$TOKEN_SQL -$TEAM_API_KEY_SQL -$ENV_SQL -COMMIT;" - -# Execute the SQL against the database -export PGPASSWORD="$DB_PASSWORD" -echo "$SQL" | psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -v ON_ERROR_STOP=1 - -echo "Database seeded successfully for AWS environment" \ No newline at end of file diff --git a/packages/shared/scripts/e2b.toml b/packages/shared/scripts/e2b.toml index 0eca009..88b6a10 100644 --- a/packages/shared/scripts/e2b.toml +++ b/packages/shared/scripts/e2b.toml @@ -14,6 +14,3 @@ memory_mb = 512 dockerfile = "e2b.Dockerfile" template_name = "base" template_id = "rki5dems9wqfm4r03t7g" -cloud = "aws" -region = "us-east-1" -provider = "aws" diff --git a/packages/shared/scripts/migrate.go b/packages/shared/scripts/migrate.go deleted file mode 100644 index 92724fa..0000000 --- a/packages/shared/scripts/migrate.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "os" - - _ "github.com/lib/pq" // PostgreSQL driver -) - -func main() { - connectionString := os.Getenv("POSTGRES_CONNECTION_STRING") - if connectionString == "" { - log.Fatalf("POSTGRES_CONNECTION_STRING is not set") - } - - db, err := sql.Open("postgres", connectionString) - if err != nil { - log.Fatalf("Failed to connect to database: %v", err) - } - defer db.Close() - - migration, err := os.ReadFile("migration.sql") - if err != nil { - log.Fatalf("Failed to read migration file: %v", err) - } - - // Execute the migration - _, err = db.Exec(string(migration)) - if err != nil { - log.Fatalf("Failed to execute migration: %v", err) - } - - fmt.Println("Migration completed successfully.") -} diff --git a/packages/shared/scripts/seed-db.go b/packages/shared/scripts/seed-db.go deleted file mode 100644 index d6a53b9..0000000 --- a/packages/shared/scripts/seed-db.go +++ /dev/null @@ -1,174 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/google/uuid" - - "github.com/e2b-dev/infra/packages/shared/pkg/db" - "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" - "github.com/e2b-dev/infra/packages/shared/pkg/models/team" -) - -// setupSystemRecords initializes the required database records for system operation -func setupSystemRecords(ctx context.Context, dbConn *db.DB, userEmail, orgID, authToken, apiKey string) { - orgUUID := uuid.MustParse(orgID) - - // Initialize user record - systemUser, err := dbConn.Client.User.Create().SetEmail(userEmail).SetID(uuid.New()).Save(ctx) - if err != nil { - panic(err) - } - - // Clean up existing organization if present - _, err = dbConn.Client.Team.Delete().Where(team.Email(userEmail)).Exec(ctx) - if err != nil { - fmt.Println("Note: Unable to remove existing organization:", err) - } - - // Remove previous authentication tokens - _, err = dbConn.Client.AccessToken.Delete().Where(accesstoken.UserID(systemUser.ID)).Exec(ctx) - if err != nil { - fmt.Println("Note: Unable to remove authentication token:", err) - } - - // Create organization record - organization, err := dbConn.Client.Team.Create().SetEmail(userEmail).SetName("E2B").SetID(orgUUID).SetTier("base_v1").Save(ctx) - if err != nil { - panic(err) - } - - // Link user to organization - _, err = dbConn.Client.UsersTeams.Create().SetUserID(systemUser.ID).SetTeamID(organization.ID).SetIsDefault(true).Save(ctx) - if err != nil { - panic(err) - } - - // Generate authentication token - _, err = dbConn.Client.AccessToken.Create().SetUser(systemUser).SetID(authToken).Save(ctx) - if err != nil { - panic(err) - } - - // Create API access key - _, err = dbConn.Client.TeamAPIKey.Create().SetTeam(organization).SetAPIKey(apiKey).Save(ctx) - if err != nil { - panic(err) - } - - // Initialize environment template - _, err = dbConn.Client.Env.Create().SetTeam(organization).SetID("rki5dems9wqfm4r03t7g").SetPublic(true).Save(ctx) - if err != nil { - panic(err) - } -} - -func main() { - ctx := context.Background() - - // Establish database connection - dbConn, err := db.NewClient() - if err != nil { - panic(err) - } - defer dbConn.Close() - - // Verify database state - recordCount, err := dbConn.Client.Team.Query().Count(ctx) - if err != nil { - panic(err) - } - - if recordCount > 1 { - panic("Database already contains existing data") - } - - // Locate user configuration directory - userHomeDir, err := os.UserHomeDir() - if err != nil { - fmt.Println("Error accessing home directory:", err) - return - } - - // Attempt to load configuration - configFilePath := filepath.Join(userHomeDir, ".e2b", "config.json") - configData, err := os.ReadFile(configFilePath) - if err != nil { - fmt.Println("Note: Configuration file not found:", err) - fmt.Println("Creating default AWS configuration...") - - // Default configuration values - userEmail := "admin@example.com" - orgID := "00000000-0000-0000-0000-000000000000" - authToken := "e2b_access_token" - apiKey := "e2b_team_api_key" - - // Initialize database with default values - setupSystemRecords(ctx, dbConn, userEmail, orgID, authToken, apiKey) - - // Generate default configuration file - defaultConfig := map[string]interface{}{ - "email": userEmail, - "teamId": orgID, - "accessToken": authToken, - "teamApiKey": apiKey, - "cloud": "aws", - "region": "us-east-1", - } - - formattedConfig, err := json.MarshalIndent(defaultConfig, "", " ") - if err != nil { - panic(err) - } - - // Ensure configuration directory exists - os.MkdirAll(filepath.Join(userHomeDir, ".e2b"), 0755) - - if err := os.WriteFile(configFilePath, formattedConfig, 0644); err != nil { - panic(err) - } - - fmt.Println("Default AWS configuration created at:", configFilePath) - return - } - - // Parse configuration data - configMap := map[string]interface{}{} - err = json.Unmarshal(configData, &configMap) - if err != nil { - panic(err) - } - - // Ensure AWS configuration is present - if _, exists := configMap["cloud"]; !exists { - configMap["cloud"] = "aws" - configMap["region"] = "us-east-1" - - // Update configuration file - updatedConfig, err := json.MarshalIndent(configMap, "", " ") - if err != nil { - panic(err) - } - - if err := os.WriteFile(configFilePath, updatedConfig, 0644); err != nil { - panic(err) - } - - fmt.Println("Configuration updated with AWS settings") - } - - // Extract configuration values - userEmail := configMap["email"].(string) - orgID := configMap["teamId"].(string) - authToken := configMap["accessToken"].(string) - apiKey := configMap["teamApiKey"].(string) - - // Initialize database with configuration values - setupSystemRecords(ctx, dbConn, userEmail, orgID, authToken, apiKey) - - fmt.Printf("Database initialization complete.\n") -} diff --git a/packages/shared/scripts/seed/postgres/seed-db.go b/packages/shared/scripts/seed/postgres/seed-db.go new file mode 100644 index 0000000..6bd87e3 --- /dev/null +++ b/packages/shared/scripts/seed/postgres/seed-db.go @@ -0,0 +1,151 @@ +package main + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/google/uuid" + + "github.com/e2b-dev/infra/packages/shared/pkg/db" + "github.com/e2b-dev/infra/packages/shared/pkg/keys" + "github.com/e2b-dev/infra/packages/shared/pkg/models/accesstoken" + "github.com/e2b-dev/infra/packages/shared/pkg/models/team" +) + +func main() { + ctx := context.Background() + hasher := keys.NewSHA256Hashing() + + database, err := db.NewClient(1, 1) + if err != nil { + panic(err) + } + defer database.Close() + + count, err := database.Client.Team.Query().Count(ctx) + if err != nil { + panic(err) + } + + if count > 1 { + panic("Database contains some non-trivial data.") + } + + homeDir, err := os.UserHomeDir() + if err != nil { + fmt.Println("Error getting home directory:", err) + return + } + + configPath := filepath.Join(homeDir, ".e2b", "config.json") + data, err := os.ReadFile(configPath) + if err != nil { + panic(err) + } + + config := map[string]interface{}{} + err = json.Unmarshal(data, &config) + if err != nil { + panic(err) + } + + email := config["email"].(string) + teamID := config["teamId"].(string) + accessToken := config["accessToken"].(string) + teamAPIKey := config["teamApiKey"].(string) + teamUUID := uuid.MustParse(teamID) + + // Open .e2b/config.json + user, err := database.Client.User.Create().SetEmail(email).SetID(uuid.New()).Save(ctx) + if err != nil { + panic(err) + } + + // Delete team + _, err = database.Client.Team.Delete().Where(team.Email(email)).Exec(ctx) + if err != nil { + panic(err) + } + + // Remove old access token + _, err = database.Client.AccessToken.Delete().Where(accesstoken.UserID(user.ID)).Exec(ctx) + if err != nil { + panic(err) + } + + // Create team + t, err := database.Client.Team.Create().SetEmail(email).SetName("E2B").SetID(teamUUID).SetTier("base_v1").Save(ctx) + if err != nil { + panic(err) + } + + // Create user team + _, err = database.Client.UsersTeams.Create().SetUserID(user.ID).SetTeamID(t.ID).SetIsDefault(true).Save(ctx) + if err != nil { + panic(err) + } + + // Create access token + tokenWithoutPrefix := strings.TrimPrefix(accessToken, keys.AccessTokenPrefix) + accessTokenBytes, err := hex.DecodeString(tokenWithoutPrefix) + if err != nil { + panic(err) + } + accessTokenHash := hasher.Hash(accessTokenBytes) + accessTokenMask, err := keys.MaskKey(keys.AccessTokenPrefix, tokenWithoutPrefix) + if err != nil { + panic(err) + } + _, err = database.Client.AccessToken.Create(). + SetUser(user). + SetAccessToken(accessToken). + SetAccessTokenHash(accessTokenHash). + SetAccessTokenPrefix(accessTokenMask.Prefix). + SetAccessTokenLength(accessTokenMask.ValueLength). + SetAccessTokenMaskPrefix(accessTokenMask.MaskedValuePrefix). + SetAccessTokenMaskSuffix(accessTokenMask.MaskedValueSuffix). + SetName("Seed Access Token"). + Save(ctx) + if err != nil { + panic(err) + } + + // Create team api key + keyWithoutPrefix := strings.TrimPrefix(teamAPIKey, keys.ApiKeyPrefix) + apiKeyBytes, err := hex.DecodeString(keyWithoutPrefix) + if err != nil { + panic(err) + } + apiKeyHash := hasher.Hash(apiKeyBytes) + apiKeyMask, err := keys.MaskKey(keys.ApiKeyPrefix, keyWithoutPrefix) + if err != nil { + panic(err) + } + _, err = database.Client.TeamAPIKey.Create(). + SetTeam(t). + SetAPIKey(teamAPIKey). + SetAPIKeyHash(apiKeyHash). + SetAPIKeyPrefix(apiKeyMask.Prefix). + SetAPIKeyLength(apiKeyMask.ValueLength). + SetAPIKeyMaskPrefix(apiKeyMask.MaskedValuePrefix). + SetAPIKeyMaskSuffix(apiKeyMask.MaskedValueSuffix). + SetName("Seed API Key"). + Save(ctx) + if err != nil { + panic(err) + } + + // Create template + _, err = database.Client.Env.Create().SetTeam(t).SetID("rki5dems9wqfm4r03t7g").SetPublic(true).Save(ctx) + if err != nil { + panic(err) + } + // Run from make file and build base env + + fmt.Printf("Database seeded.\n") +} diff --git a/packages/template-manager/.gitignore b/packages/template-manager/.gitignore deleted file mode 100644 index 42e6d19..0000000 --- a/packages/template-manager/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.out -.plan -alloc -plugin -bin diff --git a/packages/template-manager/Makefile b/packages/template-manager/Makefile deleted file mode 100644 index b3d9465..0000000 --- a/packages/template-manager/Makefile +++ /dev/null @@ -1,82 +0,0 @@ -#client := gcloud compute instances list --format='csv(name)' --project $(GCP_PROJECT_ID) | grep "client" - -.PHONY: init -init: - brew install protobuf - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 - go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 - -.PHONY: generate -generate: - # You need to install protobuf (brew install protobuf) and following go packages: protoc-gen-go, protoc-gen-go-grpc - # https://grpc.io/docs/languages/go/quickstart/ - @echo "Generating..." - @protoc --go_out=../shared/pkg/grpc/template-manager/ --go_opt=paths=source_relative --go-grpc_out=../shared/pkg/grpc/template-manager/ --go-grpc_opt=paths=source_relative template-manager.proto - @echo "Done" - -.PHONY: build -build: - # Allow for passing commit sha directly for docker builds - $(eval COMMIT_SHA ?= $(shell git rev-parse --short HEAD)) - go mod tidy - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/template-manager -ldflags "-X=main.commitSHA=$(COMMIT_SHA)" . - -.PHONY: build-debug -build-debug: - CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -race -gcflags=all="-N -l" -o bin/template-manager . - -.PHONY: upload-gcp -upload-gcp: - echo "GCP upload has been deprecated. Please use upload-aws or upload target instead." - -.PHONY: upload-aws -upload-aws: - # Create repository if it doesn't exist - $(eval IMAGE := e2b-orchestration/template-manager) - aws ecr describe-repositories --repository-names $(IMAGE) --region $(AWS_REGION) || \ - aws ecr create-repository --repository-name $(IMAGE) --region $(AWS_REGION) - # Login to ECR - aws ecr get-login-password --region $(AWS_REGION) | sudo docker login --username AWS --password-stdin $(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com - # Build and push Docker image - $(eval COMMIT_SHA := $(shell git rev-parse --short HEAD)) - @cp -r ../shared .shared/ - sudo docker buildx install || true # sets up the buildx as default docker builder - sudo docker build --platform linux/amd64 --tag "$(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(IMAGE):latest" --build-arg COMMIT_SHA="$(COMMIT_SHA)" . - sudo docker push "$(AWS_ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(IMAGE):latest" - @rm -rf .shared/ - -.PHONY: upload -upload: - @if [ -n "$(AWS_ACCOUNT_ID)" ] && [ -n "$(AWS_REGION)" ]; then \ - $(MAKE) upload-aws; \ - else \ - echo "AWS_ACCOUNT_ID and AWS_REGION must be set"; \ - exit 1; \ - fi - -.PHONY: build-and-upload -build-and-upload: - @rm -rf .shared/ - @cp -r ../shared .shared/ - $(MAKE) build - @rm -rf .shared/ - $(eval E2B_EXECUTE_FILE_BUCKET := $(shell grep CFNSOFTWAREBUCKET /opt/config.properties | cut -d'=' -f2)) - @echo "Using bucket: $(E2B_EXECUTE_FILE_BUCKET)" - aws s3 cp bin/template-manager s3://$(E2B_EXECUTE_FILE_BUCKET)/template-manager - - -.PHONY: test -test: - go test -v ./... - -.PHONY: test-build -test-build: - sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) AWS_ACCOUNT_ID=$(AWS_ACCOUNT_ID) AWS_ECR_REPOSITORY=$(AWS_ECR_REPOSITORY) AWS_REGION=$(AWS_REGION) go run -race -gcflags=all="-N -l" main.go -test build -template d6a5c9wp4ccm7uqi4jzi -build 8e00bbdf-7f55-4025-9964-eede203c6ee5 - -.PHONY: test-delete -test-delete: - sudo TEMPLATE_BUCKET_NAME=$(TEMPLATE_BUCKET_NAME) AWS_ACCOUNT_ID=$(AWS_ACCOUNT_ID) AWS_ECR_REPOSITORY=$(AWS_ECR_REPOSITORY) AWS_REGION=$(AWS_REGION) go run -race -gcflags=all="-N -l" main.go -test delete -env 0v0c9frk1etrhpxr5ljw - -.PHONY: migrate -migrate: - ./upload-envs.sh /mnt/disks/fc-envs/v1 $(TEMPLATE_BUCKET_NAME) diff --git a/packages/template-manager/go.mod b/packages/template-manager/go.mod deleted file mode 100644 index 1d5d78f..0000000 --- a/packages/template-manager/go.mod +++ /dev/null @@ -1,140 +0,0 @@ -module github.com/e2b-dev/infra/packages/template-manager - -go 1.24.0 - -toolchain go1.24.2 - -require ( - github.com/Microsoft/hcsshim v0.12.9 - github.com/aws/aws-sdk-go v1.51.7 - github.com/docker/docker v27.3.1+incompatible - github.com/e2b-dev/infra/packages/shared v0.0.0 - github.com/firecracker-microvm/firecracker-go-sdk v1.0.0 - github.com/fsouza/go-dockerclient v1.12.0 - github.com/go-openapi/strfmt v0.23.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 - github.com/opencontainers/image-spec v1.1.0 - github.com/rs/zerolog v1.33.0 - github.com/vishvananda/netlink v1.3.0 - github.com/vishvananda/netns v0.0.5 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 - go.uber.org/zap v1.27.0 - google.golang.org/grpc v1.68.0 - google.golang.org/protobuf v1.35.2 -) - -require ( - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect - github.com/aws/aws-sdk-go-v2/config v1.29.9 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect - github.com/aws/smithy-go v1.22.2 // indirect - github.com/bits-and-blooms/bitset v1.17.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect - github.com/containerd/fifo v1.1.0 // indirect - github.com/containerd/log v0.1.0 // indirect - github.com/containernetworking/cni v1.2.3 // indirect - github.com/containernetworking/plugins v1.6.0 // indirect - github.com/dchest/uniuri v1.2.0 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.5.0 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.23.0 // indirect - github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.22.0 // indirect - github.com/go-openapi/runtime v0.28.0 // indirect - github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.24.0 // indirect - github.com/gofrs/uuid v3.3.0+incompatible // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/patternmatcher v0.6.0 // indirect - github.com/moby/sys/sequential v0.6.0 // indirect - github.com/moby/sys/user v0.3.0 // indirect - github.com/moby/sys/userns v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/oklog/ulid v1.3.1 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - go.mongodb.org/mongo-driver v1.17.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sync v0.14.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/term v0.32.0 // indirect - golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.27.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace github.com/e2b-dev/infra/packages/shared v0.0.0 => ../shared - -// Fix for deprecated and unavailable dependencies -replace github.com/mitchellh/osext => golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 - -replace github.com/gin-contrib/cors => github.com/gin-contrib/cors v1.3.1 - -replace github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.7.7 - -replace github.com/firecracker-microvm/firecracker-go-sdk => github.com/firecracker-microvm/firecracker-go-sdk v0.22.0 - -replace github.com/containernetworking/cni => github.com/containernetworking/cni v0.8.1 - -replace github.com/chenzhuoyu/iasm => github.com/chenzhuoyu/iasm v0.0.0-20241031125313-4cba78b3538b diff --git a/packages/template-manager/go.sum b/packages/template-manager/go.sum deleted file mode 100644 index 19772c2..0000000 --- a/packages/template-manager/go.sum +++ /dev/null @@ -1,623 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= -github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= -github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= -github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= -github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= -github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66 h1:MTLivtC3s89de7Fe3P8rzML/8XPNRfuyJhlRTsCEt0k= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.66/go.mod h1:NAuQ2s6gaFEsuTIb2+P5t6amB1w5MhvJFxppoezGWH0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4= -github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= -github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containernetworking/cni v0.8.1 h1:7zpDnQ3T3s4ucOuJ/ZCLrYBxzkg0AELFfII3Epo9TmI= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.7/go.mod h1:R7lXeZaBzpfqapcAbHRW8/CYwm0dHzbz0XEjofx0uB0= -github.com/containernetworking/plugins v1.6.0 h1:lrsUrLF7QODLx6gncHOqk/pnCiC7c6bvDAskV4KUifQ= -github.com/containernetworking/plugins v1.6.0/go.mod h1:rYLQWMJz/dYuW1XhHdc9xuzdkgbkWEEjwOhUm84+288= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dchest/uniuri v1.2.0 h1:koIcOUdrTIivZgSLhHQvKgqdWZq5d7KdMEWF1Ud6+5g= -github.com/dchest/uniuri v1.2.0/go.mod h1:fSzm4SLHzNZvWLvWJew423PhAzkpNQYq+uNLq4kxhkY= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/firecracker-microvm/firecracker-go-sdk v0.22.0 h1:hk28AO5ArAX9iHomi6axNLK+6+8gz1wi3ooNsUTlSFQ= -github.com/firecracker-microvm/firecracker-go-sdk v0.22.0/go.mod h1:lr7w/zmzIi72h+dDMQsRmmKS63EKvnFPEpg2KrjX2X0= -github.com/fsouza/go-dockerclient v1.12.0 h1:S2f2crEUbBNCFiF06kR/GvioEB8EMsb3Td/bpawD+aU= -github.com/fsouza/go-dockerclient v1.12.0/go.mod h1:YWUtjg8japrqD/80L98nTtCoxQFp5B5wrSsnyeB5lFo= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= -github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= -github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= -github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.22/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= -github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= -github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= -github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.11/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= -github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= -github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= -github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmLKZf+SjVanKKhCgf3bg+511DmU9eDQTen7LLbY= -github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= -github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= -github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= -github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= -github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b h1:Ey6yH0acn50T/v6CB75bGP4EMJqnv9WvnjN7oZaj+xE= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= -github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c h1:gqEdF4VwBu3lTKGHS9rXE9x1/pEaSwCXRLOZRF6qtlw= -github.com/sparrc/go-ping v0.0.0-20190613174326-4e5b6552494c/go.mod h1:eMyUVp6f/5jnzM+3zahzl7q6UXLbgSc3MKg/+ow9QW0= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= -github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= -github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= -go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0 h1:qtFISDHKolvIxzSs0gIaiPUPR0Cucb0F2coHC7ZLdps= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.57.0/go.mod h1:Y+Pop1Q6hCOnETWTW4NROK/q1hv50hM7yDaUTjG8lp8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ= -google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697 h1:LWZqQOEjDyONlF1H6afSWpAL/znlREo2tHfLoe+8LMA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0= -google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/packages/template-manager/internal/build/network_linux.go b/packages/template-manager/internal/build/network_linux.go deleted file mode 100644 index a212549..0000000 --- a/packages/template-manager/internal/build/network_linux.go +++ /dev/null @@ -1,186 +0,0 @@ -//go:build linux -// +build linux - -package build - -import ( - "context" - "fmt" - "net" - "runtime" - - "github.com/vishvananda/netlink" - "github.com/vishvananda/netns" - "go.opentelemetry.io/otel/trace" - - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" -) - - -const ( - fcTapAddress = "169.254.0.22" - fcTapMask = "30" - fcTapName = "tap0" - namespaceNamePrefix = "fc-env-" -) - -var fcTapCIDR = fmt.Sprintf("%s/%s", fcTapAddress, fcTapMask) - -type FCNetwork struct { - namespaceID string -} - -func NewFCNetwork(ctx context.Context, tracer trace.Tracer, env *Env) (*FCNetwork, error) { - childCtx, childSpan := tracer.Start(ctx, "new-fc-network") - defer childSpan.End() - - network := &FCNetwork{ - namespaceID: namespaceNamePrefix + env.BuildId, - } - - err := network.setup(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error setting up network: %w", err) - - network.Cleanup(childCtx, tracer) - - return nil, errMsg - } - - return network, err -} - -func (n *FCNetwork) setup(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "setup") - defer childSpan.End() - - // Prevent thread changes so the we can safely manipulate with namespaces - telemetry.ReportEvent(childCtx, "waiting for OS thread lock") - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - telemetry.ReportEvent(childCtx, "OS thread lock passed") - - // Save the original (host) namespace and restore it upon function exit - hostNS, err := netns.Get() - if err != nil { - errMsg := fmt.Errorf("cannot get current (host) namespace: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "saved original ns") - - defer func() { - netErr := netns.Set(hostNS) - if netErr != nil { - errMsg := fmt.Errorf("error resetting network namespace back to the host namespace: %w", netErr) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "reset network namespace back to the host namespace") - } - - netErr = hostNS.Close() - if netErr != nil { - errMsg := fmt.Errorf("error closing host network namespace: %w", netErr) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed host network namespace") - } - }() - - // Create namespace - ns, err := netns.NewNamed(n.namespaceID) - if err != nil { - errMsg := fmt.Errorf("cannot create new namespace: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created ns") - - defer func() { - nsErr := ns.Close() - if nsErr != nil { - errMsg := fmt.Errorf("error closing namespace: %w", nsErr) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed namespace") - } - }() - - // Create tap device - tapAttrs := netlink.NewLinkAttrs() - tapAttrs.Name = fcTapName - tapAttrs.Namespace = ns - - tap := &netlink.Tuntap{ - Mode: netlink.TUNTAP_MODE_TAP, - LinkAttrs: tapAttrs, - } - - err = netlink.LinkAdd(tap) - if err != nil { - errMsg := fmt.Errorf("error creating tap device: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created tap device") - - // Active tap device - err = netlink.LinkSetUp(tap) - if err != nil { - errMsg := fmt.Errorf("error setting tap device up: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set tap device up") - - // Add ip address to tap device - ip, ipNet, err := net.ParseCIDR(fcTapCIDR) - if err != nil { - errMsg := fmt.Errorf("error parsing tap CIDR: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "parsed CIDR") - - err = netlink.AddrAdd(tap, &netlink.Addr{ - IPNet: &net.IPNet{ - IP: ip, - Mask: ipNet.Mask, - }, - }) - if err != nil { - errMsg := fmt.Errorf("error setting address of the tap device: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set tap device address") - - return nil -} - -func (n *FCNetwork) Cleanup(ctx context.Context, tracer trace.Tracer) { - childCtx, childSpan := tracer.Start(ctx, "cleanup") - defer childSpan.End() - - err := netns.DeleteNamed(n.namespaceID) - if err != nil { - errMsg := fmt.Errorf("error deleting namespace: %w", err) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "deleted namespace") - } -} diff --git a/packages/template-manager/internal/build/network_other.go b/packages/template-manager/internal/build/network_other.go deleted file mode 100644 index fd13872..0000000 --- a/packages/template-manager/internal/build/network_other.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !linux -// +build !linux - -package build - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel/trace" -) - -type FCNetwork struct { - namespaceID string -} - -// Cleanup is a no-op for non-Linux systems -func (n *FCNetwork) Cleanup(ctx context.Context, tracer trace.Tracer) { -} - -// NewFCNetwork returns an error -func NewFCNetwork(ctx context.Context, tracer trace.Tracer, env *Env) (*FCNetwork, error) { - return nil, fmt.Errorf("network functionality is only supported on Linux") -} diff --git a/packages/template-manager/internal/build/provision.sh b/packages/template-manager/internal/build/provision.sh deleted file mode 100644 index 76b1114..0000000 --- a/packages/template-manager/internal/build/provision.sh +++ /dev/null @@ -1,141 +0,0 @@ -export BASH_XTRACEFD=1 -set -euxo pipefail - -echo "Starting provisioning script." - -echo "ENV_ID={{ .EnvID }}" >/.e2b -echo "BUILD_ID={{ .BuildID }}" >>/.e2b - -# We are downloading the packages manually -apt-get update --download-only -DEBIAN_FRONTEND=noninteractive DEBCONF_NOWARNINGS=yes apt-get install -y openssh-server sudo systemd socat chrony linuxptp iptables - -# Set up autologin. -mkdir -p /etc/systemd/system/serial-getty@ttyS0.service.d -cat </etc/systemd/system/serial-getty@ttyS0.service.d/autologin.conf -[Service] -ExecStart= -ExecStart=-/sbin/agetty --noissue --autologin root %I 115200,38400,9600 vt102 -EOF - -# Add swapfile — we enable it in the preexec for envd -mkdir /swap -fallocate -l 128M /swap/swapfile -chmod 600 /swap/swapfile -mkswap /swap/swapfile - -# Set up envd service. -mkdir -p /etc/systemd/system - -# Set up e2bd service. -cat </etc/systemd/system/envd.service -[Unit] -Description=Env Daemon Service - -[Service] -Type=simple -Restart=always -User=root -Group=root -Environment=GOTRACEBACK=all -LimitCORE=infinity -ExecStart=/bin/bash -l -c "/usr/bin/envd -cmd '{{ .StartCmd }}'" -OOMPolicy=continue -OOMScoreAdjust=-1000 -Environment="GOMEMLIMIT={{ .MemoryLimit }}MiB" - -ExecStartPre=/bin/bash -c 'echo 0 > /proc/sys/vm/swappiness && swapon /swap/swapfile' - -[Install] -WantedBy=multi-user.target -EOF - -# Set up chrony. -mkdir -p /etc/chrony -cat </etc/chrony/chrony.conf -refclock PHC /dev/ptp0 poll -1 dpoll -1 offset 0 trust prefer -makestep 1 -1 -EOF - -# Add a proxy config, as some environments expects it there (e.g. timemaster in Node Dockerimage) -echo "include /etc/chrony/chrony.conf" >/etc/chrony.conf - -mkdir -p /etc/systemd/system/chrony.service.d -# The ExecStart= should be emptying the ExecStart= line in config. -cat </etc/systemd/system/chrony.service.d/override.conf -[Service] -ExecStart= -ExecStart=/usr/sbin/chronyd -User=root -Group=root -EOF - -# Enable systemd services -# Because this script runs in a container we can't use `systemctl`. -# Containers don't run init daemons. We have to enable the runner service manually. -mkdir -p /etc/systemd/system/multi-user.target.wants -ln -s /etc/systemd/system/envd.service /etc/systemd/system/multi-user.target.wants/envd.service - -# Set up shell. -echo "export SHELL='/bin/bash'" >/etc/profile.d/shell.sh -echo "export PS1='\w \$ '" >/etc/profile.d/prompt.sh -echo "export PS1='\w \$ '" >>"/etc/profile" -echo "export PS1='\w \$ '" >>"/root/.bashrc" - -# Use .bashrc and .profile -echo "if [ -f ~/.bashrc ]; then source ~/.bashrc; fi; if [ -f ~/.profile ]; then source ~/.profile; fi" >>/etc/profile - -# Set up SSH. -mkdir -p /etc/ssh -cat <>/etc/ssh/sshd_config -PermitRootLogin yes -PermitEmptyPasswords yes -PasswordAuthentication yes -EOF - -# Remove password for root. -passwd -d root - -# Create default user. -adduser --disabled-password --gecos "" user -usermod -aG sudo user -passwd -d user -echo "user ALL=(ALL:ALL) NOPASSWD: ALL" >>/etc/sudoers - -mkdir -p /code -mkdir -p /home/user - -chmod 777 -R /home/user -chmod 777 -R /usr/local -chmod 777 -R /code - -# TODO: Right now the chown line has no effect in the FC, even though it correctly changes the owner here. -# It may be because of the way we are starting the FC VM? - -# Add DNS. -echo "nameserver 8.8.8.8" >/etc/resolv.conf - -# Start systemd services -systemctl enable envd -systemctl enable chrony 2>&1 - -cat </etc/systemd/system/forward_ports.service -[Unit] -Description=Forward Ports Service - -[Service] -Type=simple -Restart=no -User=root -Group=root -ExecStart=/bin/bash -l -c "(echo 1 | tee /proc/sys/net/ipv4/ip_forward) && iptables-legacy -t nat -A POSTROUTING -s 127.0.0.1 -j SNAT --to-source {{ .FcAddress }} && iptables-legacy -t nat -A PREROUTING -d {{ .FcAddress }} -j DNAT --to-destination 127.0.0.1" - -[Install] -WantedBy=multi-user.target -EOF - - - -# systemctl enable forward_ports - -echo "Finished provisioning script" diff --git a/packages/template-manager/internal/build/rootfs.go b/packages/template-manager/internal/build/rootfs.go deleted file mode 100644 index 31ccff2..0000000 --- a/packages/template-manager/internal/build/rootfs.go +++ /dev/null @@ -1,679 +0,0 @@ -package build - -import ( - "archive/tar" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ecr" - "io" - "math" - "os" - "os/exec" - "strings" - "time" - - "github.com/Microsoft/hcsshim/ext4/tar2ext4" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/client" - docker "github.com/fsouza/go-dockerclient" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/e2b-dev/infra/packages/shared/pkg/consts" - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" -) - -const ( - ToMBShift = 20 - // Max size of the rootfs file in MB. - maxRootfsSize = 15000 << ToMBShift - cacheTimeout = "48h" -) - -// authConfig will be populated at runtime with ECR credentials -var authConfig registry.AuthConfig - -type Rootfs struct { - client *client.Client - legacyClient *docker.Client - - env *Env -} - -type MultiWriter struct { - writers []io.Writer -} - -func (mw *MultiWriter) Write(p []byte) (int, error) { - for _, writer := range mw.writers { - _, err := writer.Write(p) - if err != nil { - return 0, err - } - } - - return len(p), nil -} - -func NewRootfs(ctx context.Context, tracer trace.Tracer, env *Env, docker *client.Client, legacyDocker *docker.Client) (*Rootfs, error) { - childCtx, childSpan := tracer.Start(ctx, "new-rootfs") - defer childSpan.End() - - rootfs := &Rootfs{ - client: docker, - legacyClient: legacyDocker, - env: env, - } - - _, _ = env.BuildLogsWriter.Write([]byte("Pulling Docker image...\n")) - err := rootfs.pullDockerImage(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error building docker image: %w", err) - - rootfs.cleanupDockerImage(childCtx, tracer) - - return nil, errMsg - } - _, _ = env.BuildLogsWriter.Write([]byte("Pulled Docker image.\n\n")) - - err = rootfs.createRootfsFile(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error creating rootfs file: %w", err) - - rootfs.cleanupDockerImage(childCtx, tracer) - - return nil, errMsg - } - - return rootfs, nil -} - -func (r *Rootfs) pullDockerImage(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "pull-docker-image") - defer childSpan.End() - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(consts.AWSRegion), - }) - if err != nil { - return fmt.Errorf("error creating AWS session: %w", err) - } - - // 创建 ECR 客户端 - ecrClient := ecr.New(sess) - - // 获取授权令牌 - input := &ecr.GetAuthorizationTokenInput{} - result, err := ecrClient.GetAuthorizationToken(input) - if err != nil { - return fmt.Errorf("error getting ECR auth token: %w", err) - } - - // 处理授权数据 - if len(result.AuthorizationData) == 0 { - return fmt.Errorf("no authorization data returned") - } - - authData := result.AuthorizationData[0] - token := *authData.AuthorizationToken - - // 解码 Base64 令牌 - decodedToken, err := base64.StdEncoding.DecodeString(token) - if err != nil { - return fmt.Errorf("error decoding auth token: %w", err) - } - - // 分割用户名和密码 - parts := strings.SplitN(string(decodedToken), ":", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid auth token format") - } - // 设置认证配置 - authConfig = registry.AuthConfig{ - Username: parts[0], - Password: parts[1], - } - // Get AWS ECR authorization token - //cmd := exec.CommandContext(childCtx, "aws", "ecr", "get-login-password", "--region", consts.AWSRegion) - //password, err := cmd.Output() - //if err != nil { - // errMsg := fmt.Errorf("error getting ECR auth token: %w", err) - // telemetry.ReportCriticalError(childCtx, errMsg) - // return errMsg - //} - - // Set up auth config with ECR credentials - //authConfig = registry.AuthConfig{ - // Username: "AWS", - // Password: strings.TrimSpace(string(password)), - //} - authConfigBytes, err := json.Marshal(authConfig) - if err != nil { - errMsg := fmt.Errorf("error marshaling auth config: %w", err) - return errMsg - } - - authConfigBase64 := base64.URLEncoding.EncodeToString(authConfigBytes) - - logs, err := r.client.ImagePull(childCtx, r.dockerTag(), image.PullOptions{ - RegistryAuth: authConfigBase64, - Platform: "linux/amd64", - }) - if err != nil { - errMsg := fmt.Errorf("error pulling image: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - return errMsg - } - - _, err = io.Copy(os.Stdout, logs) - if err != nil { - errMsg := fmt.Errorf("error copying logs: %w", err) - telemetry.ReportError(childCtx, errMsg) - return errMsg - } - - err = logs.Close() - if err != nil { - errMsg := fmt.Errorf("error closing logs: %w", err) - telemetry.ReportError(childCtx, errMsg) - return errMsg - } - - telemetry.ReportEvent(childCtx, "pulled image") - return nil -} - -func (r *Rootfs) cleanupDockerImage(ctx context.Context, tracer trace.Tracer) { - childCtx, childSpan := tracer.Start(ctx, "cleanup-docker-image") - defer childSpan.End() - - _, err := r.client.ImageRemove(childCtx, r.dockerTag(), image.RemoveOptions{ - Force: false, - PruneChildren: false, - }) - if err != nil { - errMsg := fmt.Errorf("error removing image: %w", err) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "removed image") - } -} - -func (r *Rootfs) dockerTag() string { - return fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s/%s:%s", consts.AWSAccountID, consts.AWSRegion, consts.ECRRepository, r.env.TemplateId, r.env.BuildId) -} - -type PostProcessor struct { - errChan chan error - ctx context.Context - writer io.Writer -} - -// Start starts the post-processing. -func (p *PostProcessor) Start() { - - now := time.Now() - for { - msg := []byte(fmt.Sprintf("Postprocessing (%s) \r", time.Since(now).Round(time.Second))) - - select { - case postprocessingErr := <-p.errChan: - if postprocessingErr != nil { - p.writer.Write([]byte(fmt.Sprintf("Postprocessing failed: %s\n", postprocessingErr))) - - return - } - - p.writer.Write(msg) - p.writer.Write([]byte("Postprocessing finished. \n")) - - return - case <-p.ctx.Done(): - return - case <-time.After(100 * time.Millisecond): - p.writer.Write(msg) - } - } - -} - -func (p *PostProcessor) stop(err error) { - p.errChan <- err -} - -func NewPostProcessor(ctx context.Context, writer io.Writer) *PostProcessor { - return &PostProcessor{ - ctx: ctx, - writer: writer, - errChan: make(chan error), - } -} - -func (r *Rootfs) createRootfsFile(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "create-rootfs-file") - defer childSpan.End() - - var err error - PostProcessor := NewPostProcessor(childCtx, r.env.BuildLogsWriter) - go PostProcessor.Start() - defer PostProcessor.stop(err) - - var scriptDef bytes.Buffer - - err = EnvInstanceTemplate.Execute(&scriptDef, struct { - EnvID string - BuildID string - StartCmd string - FcAddress string - MemoryLimit int - }{ - FcAddress: fcAddr, - EnvID: r.env.TemplateId, - BuildID: r.env.BuildId, - StartCmd: strings.ReplaceAll(r.env.StartCmd, "'", "\\'"), - MemoryLimit: int(math.Min(float64(r.env.MemoryMB)/2, 512)), - }) - if err != nil { - errMsg := fmt.Errorf("error executing provision script: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "executed provision script env") - - if err != nil { - errMsg := fmt.Errorf("error generating network name: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created network") - - pidsLimit := int64(200) - - cont, err := r.client.ContainerCreate(childCtx, &container.Config{ - Image: r.dockerTag(), - Entrypoint: []string{"/bin/bash", "-c"}, - User: "root", - Cmd: []string{scriptDef.String()}, - Tty: false, - AttachStdout: true, - AttachStderr: true, - }, &container.HostConfig{ - SecurityOpt: []string{"no-new-privileges"}, - CapAdd: []string{"CHOWN", "DAC_OVERRIDE", "FSETID", "FOWNER", "SETGID", "SETUID", "NET_RAW", "SYS_CHROOT"}, - CapDrop: []string{"ALL"}, - // TODO: Network mode is causing problems with /etc/hosts - we want to find a way to fix this and enable network mode again - // NetworkMode: container.NetworkMode(network.ID), - Resources: container.Resources{ - Memory: r.env.MemoryMB << ToMBShift, - CPUPeriod: 100000, - CPUQuota: r.env.VCpuCount * 100000, - MemorySwap: r.env.MemoryMB << ToMBShift, - PidsLimit: &pidsLimit, - }, - }, nil, &v1.Platform{}, "") - if err != nil { - errMsg := fmt.Errorf("error creating container: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created container") - - defer func() { - go func() { - cleanupContext, cleanupSpan := tracer.Start( - trace.ContextWithSpanContext(context.Background(), childSpan.SpanContext()), - "cleanup-container", - ) - defer cleanupSpan.End() - - removeErr := r.legacyClient.RemoveContainer(docker.RemoveContainerOptions{ - ID: cont.ID, - RemoveVolumes: true, - Force: true, - Context: cleanupContext, - }) - if removeErr != nil { - errMsg := fmt.Errorf("error removing container: %w", removeErr) - telemetry.ReportError(cleanupContext, errMsg) - } else { - telemetry.ReportEvent(cleanupContext, "removed container") - } - - // Move pruning to separate goroutine - cacheTimeoutArg := filters.Arg("until", cacheTimeout) - - _, pruneErr := r.client.BuildCachePrune(cleanupContext, types.BuildCachePruneOptions{ - Filters: filters.NewArgs(cacheTimeoutArg), - All: true, - }) - if pruneErr != nil { - errMsg := fmt.Errorf("error pruning build cache: %w", pruneErr) - telemetry.ReportError(cleanupContext, errMsg) - } else { - telemetry.ReportEvent(cleanupContext, "pruned build cache") - } - - _, pruneErr = r.client.ImagesPrune(cleanupContext, filters.NewArgs(cacheTimeoutArg)) - if pruneErr != nil { - errMsg := fmt.Errorf("error pruning images: %w", pruneErr) - telemetry.ReportError(cleanupContext, errMsg) - } else { - telemetry.ReportEvent(cleanupContext, "pruned images") - } - - _, pruneErr = r.client.ContainersPrune(cleanupContext, filters.NewArgs(cacheTimeoutArg)) - if pruneErr != nil { - errMsg := fmt.Errorf("error pruning containers: %w", pruneErr) - telemetry.ReportError(cleanupContext, errMsg) - } else { - telemetry.ReportEvent(cleanupContext, "pruned containers") - } - }() - }() - - filesToTar := []fileToTar{ - { - localPath: storage.HostOldEnvdPath, - tarPath: storage.GuestOldEnvdPath, - }, - { - localPath: storage.HostEnvdPath, - tarPath: storage.GuestEnvdPath, - }, - } - - pr, pw := io.Pipe() - - go func() { - defer func() { - closeErr := pw.Close() - if closeErr != nil { - errMsg := fmt.Errorf("error closing pipe: %w", closeErr) - telemetry.ReportCriticalError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed pipe") - } - }() - - tw := tar.NewWriter(pw) - defer func() { - err = tw.Close() - if err != nil { - errMsg := fmt.Errorf("error closing tar writer: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed tar writer") - } - }() - - for _, file := range filesToTar { - addErr := addFileToTarWriter(tw, file) - if addErr != nil { - errMsg := fmt.Errorf("error adding envd to tar writer: %w", addErr) - telemetry.ReportCriticalError(childCtx, errMsg) - - return - } else { - telemetry.ReportEvent(childCtx, "added envd to tar writer") - } - } - }() - - // Copy tar to the container - err = r.legacyClient.UploadToContainer(cont.ID, docker.UploadToContainerOptions{ - InputStream: pr, - Path: "/", - Context: childCtx, - NoOverwriteDirNonDir: false, - }) - if err != nil { - errMsg := fmt.Errorf("error copying envd to container: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "copied envd to container") - - err = r.client.ContainerStart(childCtx, cont.ID, container.StartOptions{}) - if err != nil { - errMsg := fmt.Errorf("error starting container: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "started container") - - go func() { - anonymousChildCtx, anonymousChildSpan := tracer.Start(childCtx, "handle-container-logs", trace.WithSpanKind(trace.SpanKindConsumer)) - defer anonymousChildSpan.End() - - containerStdoutWriter := telemetry.NewEventWriter(anonymousChildCtx, "stdout") - containerStderrWriter := telemetry.NewEventWriter(anonymousChildCtx, "stderr") - - writer := &MultiWriter{ - writers: []io.Writer{containerStderrWriter, r.env.BuildLogsWriter}, - } - - logsErr := r.legacyClient.Logs(docker.LogsOptions{ - Stdout: true, - Stderr: true, - RawTerminal: false, - OutputStream: containerStdoutWriter, - ErrorStream: writer, - Context: childCtx, - Container: cont.ID, - Follow: true, - Timestamps: false, - }) - if logsErr != nil { - errMsg := fmt.Errorf("error getting container logs: %w", logsErr) - telemetry.ReportError(anonymousChildCtx, errMsg) - } else { - telemetry.ReportEvent(anonymousChildCtx, "setup container logs") - } - }() - - wait, errWait := r.client.ContainerWait(childCtx, cont.ID, container.WaitConditionNotRunning) - select { - case <-childCtx.Done(): - errMsg := fmt.Errorf("error waiting for container: %w", childCtx.Err()) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - case waitErr := <-errWait: - if waitErr != nil { - errMsg := fmt.Errorf("error waiting for container: %w", waitErr) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - case response := <-wait: - if response.Error != nil { - errMsg := fmt.Errorf("error waiting for container - code %d: %s", response.StatusCode, response.Error.Message) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - } - - telemetry.ReportEvent(childCtx, "waited for container exit") - - inspection, err := r.client.ContainerInspect(ctx, cont.ID) - if err != nil { - errMsg := fmt.Errorf("error inspecting container: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "inspected container") - - if inspection.State.Running { - errMsg := fmt.Errorf("container is still running") - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - if inspection.State.ExitCode != 0 { - errMsg := fmt.Errorf("container exited with status %d: %s", inspection.State.ExitCode, inspection.State.Error) - telemetry.ReportCriticalError( - childCtx, - errMsg, - attribute.Int("exit_code", inspection.State.ExitCode), - attribute.String("error", inspection.State.Error), - attribute.Bool("oom", inspection.State.OOMKilled), - ) - - return errMsg - } - - rootfsFile, err := os.Create(r.env.BuildRootfsPath()) - if err != nil { - errMsg := fmt.Errorf("error creating rootfs file: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created rootfs file") - - defer func() { - rootfsErr := rootfsFile.Close() - if rootfsErr != nil { - errMsg := fmt.Errorf("error closing rootfs file: %w", rootfsErr) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed rootfs file") - } - }() - - pr, pw = io.Pipe() - - go func() { - downloadErr := r.legacyClient.DownloadFromContainer(cont.ID, docker.DownloadFromContainerOptions{ - Context: childCtx, - Path: "/", - OutputStream: pw, - }) - if downloadErr != nil { - errMsg := fmt.Errorf("error downloading from container: %w", downloadErr) - telemetry.ReportCriticalError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "downloaded from container") - } - - closeErr := pw.Close() - if closeErr != nil { - errMsg := fmt.Errorf("error closing pipe: %w", closeErr) - telemetry.ReportCriticalError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "closed pipe") - } - }() - - telemetry.ReportEvent(childCtx, "coverting tar to ext4") - - // This package creates a read-only ext4 filesystem from a tar archive. - // We need to use another program to make the filesystem writable. - err = tar2ext4.ConvertTarToExt4(pr, rootfsFile, tar2ext4.MaximumDiskSize(maxRootfsSize)) - if err != nil { - if strings.Contains(err.Error(), "disk exceeded maximum size") { - r.env.BuildLogsWriter.Write([]byte(fmt.Sprintf("Build failed - exceeded maximum size %v MB.\n", maxRootfsSize>>ToMBShift))) - } - - errMsg := fmt.Errorf("error converting tar to ext4: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "converted container tar to ext4") - - tuneContext, tuneSpan := tracer.Start(childCtx, "tune-rootfs-file-cmd") - defer tuneSpan.End() - - cmd := exec.CommandContext(tuneContext, "tune2fs", "-O", "^read-only", r.env.BuildRootfsPath()) - - tuneStdoutWriter := telemetry.NewEventWriter(tuneContext, "stdout") - cmd.Stdout = tuneStdoutWriter - - tuneStderrWriter := telemetry.NewEventWriter(childCtx, "stderr") - cmd.Stderr = tuneStderrWriter - - err = cmd.Run() - if err != nil { - errMsg := fmt.Errorf("error making rootfs file writable: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "made rootfs file writable") - - rootfsStats, err := rootfsFile.Stat() - if err != nil { - errMsg := fmt.Errorf("error statting rootfs file: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "statted rootfs file") - - // In bytes - rootfsSize := rootfsStats.Size() + r.env.DiskSizeMB<= timeout { - return fmt.Errorf("timeout reached while waiting for socket file") - } - - // Wait for a short duration before checking again - time.Sleep(socketReadyCheckInterval) - } else { - // Error occurred while checking for socket file - return err - } - } -} - -func newFirecrackerClient(socketPath string) *client.Firecracker { - httpClient := client.NewHTTPClient(strfmt.NewFormats()) - - transport := firecracker.NewUnixSocketTransport(socketPath, nil, false) - httpClient.SetTransport(transport) - - return httpClient -} - -func NewSnapshot(ctx context.Context, tracer trace.Tracer, env *Env, network *FCNetwork, rootfs *Rootfs) (*Snapshot, error) { - childCtx, childSpan := tracer.Start(ctx, "new-snapshot") - defer childSpan.End() - - socketFileName := fmt.Sprintf("fc-sock-%s.sock", env.BuildId) - socketPath := filepath.Join(tmpDirPath, socketFileName) - - client := newFirecrackerClient(socketPath) - - telemetry.ReportEvent(childCtx, "created fc client") - - snapshot := &Snapshot{ - socketPath: socketPath, - client: client, - env: env, - fc: nil, - } - - defer snapshot.cleanupFC(childCtx, tracer) - - err := snapshot.startFCProcess( - childCtx, - tracer, - env.FirecrackerPath(), - network.namespaceID, - storage.KernelMountDir, - env.CacheKernelDir(), - ) - if err != nil { - errMsg := fmt.Errorf("error starting fc process: %w", err) - - return nil, errMsg - } - - telemetry.ReportEvent(childCtx, "started fc process") - - err = snapshot.configureFC(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error configuring fc: %w", err) - - return nil, errMsg - } - - telemetry.ReportEvent(childCtx, "configured fc") - - // Wait for all necessary things in FC to start - // TODO: Maybe init should signalize when it's ready? - time.Sleep(waitTimeForFCStart) - telemetry.ReportEvent(childCtx, "waited for fc to start", attribute.Float64("seconds", float64(waitTimeForFCStart/time.Second))) - - if env.StartCmd != "" { - // HACK: This is a temporary fix for a customer that needs a bigger time to start the command. - // TODO: Remove this after we can add customizable wait time for building templates. - if env.TemplateId == "zegbt9dl3l2ixqem82mm" || env.TemplateId == "ot5bidkk3j2so2j02uuz" { - time.Sleep(120 * time.Second) - } else { - time.Sleep(waitTimeForStartCmd) - } - telemetry.ReportEvent(childCtx, "waited for start command", attribute.Float64("seconds", float64(waitTimeForStartCmd/time.Second))) - } - - err = snapshot.pauseFC(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error pausing fc: %w", err) - - return nil, errMsg - } - - err = snapshot.snapshotFC(childCtx, tracer) - if err != nil { - errMsg := fmt.Errorf("error snapshotting fc: %w", err) - - return nil, errMsg - } - - return snapshot, nil -} - -func (s *Snapshot) startFCProcess( - ctx context.Context, - tracer trace.Tracer, - fcBinaryPath, - networkNamespaceID, - kernelMountDir, - kernelDirPath string, -) error { - childCtx, childSpan := tracer.Start(ctx, "start-fc-process") - defer childSpan.End() - kernelMountCmd := fmt.Sprintf( - "mount --bind %s %s && ", - kernelDirPath, - kernelMountDir, - ) - inNetNSCmd := fmt.Sprintf("ip netns exec %s ", networkNamespaceID) - fcCmd := fmt.Sprintf("%s --api-sock %s", fcBinaryPath, s.socketPath) - - s.fc = exec.CommandContext(childCtx, "unshare", "-pm", "--kill-child", "--", "bash", "-c", kernelMountCmd+inNetNSCmd+fcCmd) - - fcVMStdoutWriter := telemetry.NewEventWriter(childCtx, "stdout") - fcVMStderrWriter := telemetry.NewEventWriter(childCtx, "stderr") - - stdoutPipe, err := s.fc.StdoutPipe() - if err != nil { - errMsg := fmt.Errorf("error creating fc stdout pipe: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - stderrPipe, err := s.fc.StderrPipe() - if err != nil { - errMsg := fmt.Errorf("error creating fc stderr pipe: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - closeErr := stdoutPipe.Close() - if closeErr != nil { - closeErrMsg := fmt.Errorf("error closing fc stdout pipe: %w", closeErr) - telemetry.ReportError(childCtx, closeErrMsg) - } - - return errMsg - } - - var outputWaitGroup sync.WaitGroup - - outputWaitGroup.Add(1) - go func() { - scanner := bufio.NewScanner(stdoutPipe) - - for scanner.Scan() { - line := scanner.Text() - fcVMStdoutWriter.Write([]byte(line)) - } - - outputWaitGroup.Done() - }() - - outputWaitGroup.Add(1) - go func() { - scanner := bufio.NewScanner(stderrPipe) - - for scanner.Scan() { - line := scanner.Text() - fcVMStderrWriter.Write([]byte(line)) - } - - outputWaitGroup.Done() - }() - - err = s.fc.Start() - if err != nil { - errMsg := fmt.Errorf("error starting fc process: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "started fc process") - - go func() { - anonymousChildCtx, anonymousChildSpan := tracer.Start(ctx, "handle-fc-process-wait") - defer anonymousChildSpan.End() - - outputWaitGroup.Wait() - - waitErr := s.fc.Wait() - if err != nil { - errMsg := fmt.Errorf("error waiting for fc process: %w", waitErr) - telemetry.ReportError(anonymousChildCtx, errMsg) - } else { - telemetry.ReportEvent(anonymousChildCtx, "fc process exited") - } - }() - - // Wait for the FC process to start so we can use FC API - err = waitForSocket(s.socketPath, socketWaitTimeout) - if err != nil { - errMsg := fmt.Errorf("error waiting for fc socket: %w", err) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "fc process created socket") - - return nil -} - -func (s *Snapshot) configureFC(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "configure-fc") - defer childSpan.End() - - ip := fmt.Sprintf("%s::%s:%s:instance:eth0:off:8.8.8.8", fcAddr, fcTapAddress, fcMaskLong) - kernelArgs := fmt.Sprintf("quiet loglevel=1 ip=%s reboot=k panic=1 pci=off nomodules i8042.nokbd i8042.noaux ipv6.disable=1 random.trust_cpu=on", ip) - kernelImagePath := storage.KernelMountedPath - bootSourceConfig := operations.PutGuestBootSourceParams{ - Context: childCtx, - Body: &models.BootSource{ - BootArgs: kernelArgs, - KernelImagePath: &kernelImagePath, - }, - } - - _, err := s.client.Operations.PutGuestBootSource(&bootSourceConfig) - if err != nil { - errMsg := fmt.Errorf("error setting fc boot source config: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set fc boot source config") - - rootfs := "rootfs" - ioEngine := "Async" - isRootDevice := true - isReadOnly := false - pathOnHost := s.env.BuildRootfsPath() - driversConfig := operations.PutGuestDriveByIDParams{ - Context: childCtx, - DriveID: rootfs, - Body: &models.Drive{ - DriveID: &rootfs, - PathOnHost: pathOnHost, - IsRootDevice: &isRootDevice, - IsReadOnly: isReadOnly, - IoEngine: &ioEngine, - }, - } - - _, err = s.client.Operations.PutGuestDriveByID(&driversConfig) - if err != nil { - errMsg := fmt.Errorf("error setting fc drivers config: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set fc drivers config") - - ifaceID := fcIfaceID - hostDevName := fcTapName - networkConfig := operations.PutGuestNetworkInterfaceByIDParams{ - Context: childCtx, - IfaceID: ifaceID, - Body: &models.NetworkInterface{ - IfaceID: &ifaceID, - GuestMac: fcMacAddress, - HostDevName: &hostDevName, - }, - } - - _, err = s.client.Operations.PutGuestNetworkInterfaceByID(&networkConfig) - if err != nil { - errMsg := fmt.Errorf("error setting fc network config: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set fc network config") - - smt := true - trackDirtyPages := false - - machineConfig := &models.MachineConfiguration{ - VcpuCount: &s.env.VCpuCount, - MemSizeMib: &s.env.MemoryMB, - Smt: &smt, - TrackDirtyPages: &trackDirtyPages, - } - - if s.env.Hugepages() { - machineConfig.HugePages = models.MachineConfigurationHugePagesNr2M - } - - machineConfigParams := operations.PutMachineConfigurationParams{ - Context: childCtx, - Body: machineConfig, - } - - _, err = s.client.Operations.PutMachineConfiguration(&machineConfigParams) - if err != nil { - errMsg := fmt.Errorf("error setting fc machine config: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set fc machine config") - - mmdsVersion := "V2" - mmdsConfig := operations.PutMmdsConfigParams{ - Context: childCtx, - Body: &models.MmdsConfig{ - Version: &mmdsVersion, - NetworkInterfaces: []string{fcIfaceID}, - }, - } - - _, err = s.client.Operations.PutMmdsConfig(&mmdsConfig) - if err != nil { - errMsg := fmt.Errorf("error setting fc mmds config: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "set fc mmds config") - - // We may need to sleep before start - previous configuration is processes asynchronously. How to do this sync or in one go? - time.Sleep(waitTimeForFCConfig) - - start := models.InstanceActionInfoActionTypeInstanceStart - startActionParams := operations.CreateSyncActionParams{ - Context: childCtx, - Info: &models.InstanceActionInfo{ - ActionType: &start, - }, - } - - _, err = s.client.Operations.CreateSyncAction(&startActionParams) - if err != nil { - errMsg := fmt.Errorf("error starting fc: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "started fc") - - return nil -} - -func (s *Snapshot) pauseFC(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "pause-fc") - defer childSpan.End() - - state := models.VMStatePaused - pauseConfig := operations.PatchVMParams{ - Context: childCtx, - Body: &models.VM{ - State: &state, - }, - } - - _, err := s.client.Operations.PatchVM(&pauseConfig) - if err != nil { - errMsg := fmt.Errorf("error pausing vm: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "paused fc") - - return nil -} - -func (s *Snapshot) snapshotFC(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "snapshot-fc") - defer childSpan.End() - - memfilePath := s.env.BuildMemfilePath() - snapfilePath := s.env.BuildSnapfilePath() - snapshotConfig := operations.CreateSnapshotParams{ - Context: childCtx, - Body: &models.SnapshotCreateParams{ - SnapshotType: models.SnapshotCreateParamsSnapshotTypeFull, - MemFilePath: &memfilePath, - SnapshotPath: &snapfilePath, - }, - } - - _, err := s.client.Operations.CreateSnapshot(&snapshotConfig) - if err != nil { - errMsg := fmt.Errorf("error creating vm snapshot: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "created vm snapshot") - - return nil -} - -func (s *Snapshot) cleanupFC(ctx context.Context, tracer trace.Tracer) { - childCtx, childSpan := tracer.Start(ctx, "cleanup-fc") - defer childSpan.End() - - if s.fc != nil { - err := s.fc.Cancel() - if err != nil { - errMsg := fmt.Errorf("error killing fc process: %w", err) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "killed fc process") - } - } - - err := os.RemoveAll(s.socketPath) - if err != nil { - errMsg := fmt.Errorf("error removing fc socket %w", err) - telemetry.ReportError(childCtx, errMsg) - } else { - telemetry.ReportEvent(childCtx, "removed fc socket") - } -} diff --git a/packages/template-manager/internal/build/snapshot_other.go b/packages/template-manager/internal/build/snapshot_other.go deleted file mode 100644 index 9f03cff..0000000 --- a/packages/template-manager/internal/build/snapshot_other.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !linux -// +build !linux - -package build - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/trace" -) - -var fcAddr = "127.0.0.1:5150" - -type Snapshot struct { -} - -func NewSnapshot(ctx context.Context, tracer trace.Tracer, env *Env, network *FCNetwork, rootfs *Rootfs) (*Snapshot, error) { - return nil, errors.New("snapshot is not supported on this platform") -} diff --git a/packages/template-manager/internal/build/tar.go b/packages/template-manager/internal/build/tar.go deleted file mode 100644 index b135243..0000000 --- a/packages/template-manager/internal/build/tar.go +++ /dev/null @@ -1,59 +0,0 @@ -package build - -import ( - "archive/tar" - "fmt" - "io" - "os" -) - -func addFileToTarWriter(writer *tar.Writer, file fileToTar) error { - f, err := os.Open(file.localPath) - if err != nil { - errMsg := fmt.Errorf("error opening file: %w", err) - - return errMsg - } - - defer func() { - closeErr := f.Close() - if closeErr != nil { - errMsg := fmt.Errorf("error closing file: %w", closeErr) - fmt.Print(errMsg) - } - }() - - stat, err := f.Stat() - if err != nil { - errMsg := fmt.Errorf("error statting file: %w", err) - - return errMsg - } - - hdr := &tar.Header{ - Name: file.tarPath, // The name of the file in the tar archive - Mode: 0o777, - Size: stat.Size(), - } - - err = writer.WriteHeader(hdr) - if err != nil { - errMsg := fmt.Errorf("error writing tar header: %w", err) - - return errMsg - } - - _, err = io.Copy(writer, f) - if err != nil { - errMsg := fmt.Errorf("error copying file to tar: %w", err) - - return errMsg - } - - return nil -} - -type fileToTar struct { - localPath string - tarPath string -} diff --git a/packages/template-manager/internal/build/template.go b/packages/template-manager/internal/build/template.go deleted file mode 100644 index bf391e5..0000000 --- a/packages/template-manager/internal/build/template.go +++ /dev/null @@ -1,106 +0,0 @@ -package build - -import ( - "context" - _ "embed" - "fmt" - "io" - "os" - "text/template" - - "github.com/docker/docker/client" - docker "github.com/fsouza/go-dockerclient" - "go.opentelemetry.io/otel/trace" - - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" -) - -type Env struct { - *storage.TemplateFiles - - // Command to run when building the env. - StartCmd string - - // The number of vCPUs to allocate to the VM. - VCpuCount int64 - - // The amount of RAM memory to allocate to the VM, in MiB. - MemoryMB int64 - - // The amount of free disk to allocate to the VM, in MiB. - DiskSizeMB int64 - - // Path to the directory where the temporary files for the build are stored. - BuildLogsWriter io.Writer - - // Real size of the rootfs after building the env. - rootfsSize int64 -} - -//go:embed provision.sh -var provisionEnvScriptFile string -var EnvInstanceTemplate = template.Must(template.New("provisioning-script").Parse(provisionEnvScriptFile)) - -// Real size in MB of rootfs after building the env -func (e *Env) RootfsSizeMB() int64 { - return e.rootfsSize >> 20 -} - -func (e *Env) Build(ctx context.Context, tracer trace.Tracer, docker *client.Client, legacyDocker *docker.Client) error { - childCtx, childSpan := tracer.Start(ctx, "build") - defer childSpan.End() - - err := os.MkdirAll(e.BuildDir(), 0o777) - if err != nil { - errMsg := fmt.Errorf("error initializing directories for building env '%s' during build '%s': %w", e.TemplateId, e.BuildId, err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - rootfs, err := NewRootfs(childCtx, tracer, e, docker, legacyDocker) - if err != nil { - errMsg := fmt.Errorf("error creating rootfs for env '%s' during build '%s': %w", e.TemplateId, e.BuildId, err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - network, err := NewFCNetwork(childCtx, tracer, e) - if err != nil { - errMsg := fmt.Errorf("error network setup for FC while building env '%s' during build '%s': %w", e.TemplateId, e.BuildId, err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - defer network.Cleanup(childCtx, tracer) - - _, err = NewSnapshot(childCtx, tracer, e, network, rootfs) - if err != nil { - errMsg := fmt.Errorf("error snapshot for env '%s' during build '%s': %w", e.TemplateId, e.BuildId, err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - return nil -} - -func (e *Env) Remove(ctx context.Context, tracer trace.Tracer) error { - childCtx, childSpan := tracer.Start(ctx, "move-to-env-dir") - defer childSpan.End() - - err := os.RemoveAll(e.BuildDir()) - if err != nil { - errMsg := fmt.Errorf("error removing build dir: %w", err) - telemetry.ReportCriticalError(childCtx, errMsg) - - return errMsg - } - - telemetry.ReportEvent(childCtx, "removed build dir") - - return nil -} diff --git a/packages/template-manager/internal/build/writer/writer.go b/packages/template-manager/internal/build/writer/writer.go deleted file mode 100644 index b2f5c38..0000000 --- a/packages/template-manager/internal/build/writer/writer.go +++ /dev/null @@ -1,26 +0,0 @@ -package writer - -import ( - "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" -) - -type BuildLogsWriter struct { - stream template_manager.TemplateService_TemplateCreateServer -} - -func (w BuildLogsWriter) Write(p []byte) (n int, err error) { - err = w.stream.Send(&template_manager.TemplateBuildLog{Log: string(p)}) - if err != nil { - return 0, err - } - - return len(p), nil -} - -func New(stream template_manager.TemplateService_TemplateCreateServer) BuildLogsWriter { - writer := BuildLogsWriter{ - stream: stream, - } - - return writer -} diff --git a/packages/template-manager/internal/constants/main.go b/packages/template-manager/internal/constants/main.go deleted file mode 100644 index 95c2116..0000000 --- a/packages/template-manager/internal/constants/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package constants - -import ( - "fmt" - "github.com/e2b-dev/infra/packages/shared/pkg/consts" - "strings" -) - -func CheckRequired() error { - var missing []string - - if consts.AWSAccountID == "" { - missing = append(missing, "AWS_ACCOUNT_ID") - } - - if consts.ECRRepository == "" { - missing = append(missing, "AWS_ECR_REPOSITORY") - } - - if consts.AWSRegion == "" { - missing = append(missing, "AWS_REGION") - } - - if len(missing) > 0 { - return fmt.Errorf("missing environment variables: %s", strings.Join(missing, ", ")) - } - - return nil -} diff --git a/packages/template-manager/internal/constants/service.go b/packages/template-manager/internal/constants/service.go deleted file mode 100644 index c97fdc0..0000000 --- a/packages/template-manager/internal/constants/service.go +++ /dev/null @@ -1,3 +0,0 @@ -package constants - -const ServiceName = "template-manager" diff --git a/packages/template-manager/internal/server/create_template.go b/packages/template-manager/internal/server/create_template.go deleted file mode 100644 index 7346b97..0000000 --- a/packages/template-manager/internal/server/create_template.go +++ /dev/null @@ -1,134 +0,0 @@ -package server - -import ( - "context" - "fmt" - "os/exec" - "strconv" - "strings" - "time" - - "go.opentelemetry.io/otel/attribute" - "google.golang.org/grpc/metadata" - - template_manager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" - "github.com/e2b-dev/infra/packages/template-manager/internal/build" - "github.com/e2b-dev/infra/packages/template-manager/internal/build/writer" -) - -const cleanupTimeout = time.Second * 10 - -func (s *serverStore) TemplateCreate(templateRequest *template_manager.TemplateCreateRequest, stream template_manager.TemplateService_TemplateCreateServer) error { - ctx := stream.Context() - - childCtx, childSpan := s.tracer.Start(ctx, "template-create") - defer childSpan.End() - - config := templateRequest.Template - - childSpan.SetAttributes( - attribute.String("env.id", config.TemplateID), - attribute.String("env.build.id", config.BuildID), - attribute.String("env.kernel.version", config.KernelVersion), - attribute.String("env.firecracker.version", config.FirecrackerVersion), - attribute.String("env.start_cmd", config.StartCommand), - attribute.Int64("env.memory_mb", int64(config.MemoryMB)), - attribute.Int64("env.vcpu_count", int64(config.VCpuCount)), - attribute.Bool("env.huge_pages", config.HugePages), - ) - - logsWriter := writer.New(stream) - template := &build.Env{ - TemplateFiles: storage.NewTemplateFiles( - config.TemplateID, - config.BuildID, - config.KernelVersion, - config.FirecrackerVersion, - config.HugePages, - ), - VCpuCount: int64(config.VCpuCount), - MemoryMB: int64(config.MemoryMB), - StartCmd: config.StartCommand, - DiskSizeMB: int64(config.DiskSizeMB), - BuildLogsWriter: logsWriter, - } - - buildStorage := s.templateStorage.NewBuild(template.TemplateFiles) - - var err error - - // Remove local template files if build fails - defer func() { - removeCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) - defer cancel() - - removeErr := template.Remove(removeCtx, s.tracer) - if removeErr != nil { - telemetry.ReportError(childCtx, removeErr) - } - }() - - err = template.Build(childCtx, s.tracer, s.dockerClient, s.legacyDockerClient) - if err != nil { - _, _ = logsWriter.Write([]byte(fmt.Sprintf("Error building environment: %v", err))) - - telemetry.ReportCriticalError(childCtx, err) - - return err - } - - // Remove build files if build fails or times out - defer func() { - if err != nil { - removeCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) - defer cancel() - - removeErr := buildStorage.Remove(removeCtx) - if removeErr != nil { - telemetry.ReportError(childCtx, removeErr) - } - } - }() - - memfilePath := template.BuildMemfilePath() - rootfsPath := template.BuildRootfsPath() - - upload := buildStorage.Upload( - childCtx, - template.BuildSnapfilePath(), - &memfilePath, - &rootfsPath, - ) - - // nosemgrep dangerous-exec-command - cmd := exec.Command(storage.HostEnvdPath, "-version") - - out, err := cmd.Output() - if err != nil { - _, _ = logsWriter.Write([]byte(fmt.Sprintf("Error while getting envd version: %v", err))) - - return err - } - - uploadErr := <-upload - if uploadErr != nil { - errMsg := fmt.Sprintf("Error while uploading build files: %v", uploadErr) - _, _ = logsWriter.Write([]byte(errMsg)) - - return uploadErr - } - - version := strings.TrimSpace(string(out)) - trailerMetadata := metadata.Pairs( - storage.RootfsSizeKey, strconv.FormatInt(template.RootfsSizeMB(), 10), - storage.EnvdVersionKey, version, - ) - - stream.SetTrailer(trailerMetadata) - - telemetry.ReportEvent(childCtx, "Environment built") - - return nil -} diff --git a/packages/template-manager/internal/server/delete_template.go b/packages/template-manager/internal/server/delete_template.go deleted file mode 100644 index 627f145..0000000 --- a/packages/template-manager/internal/server/delete_template.go +++ /dev/null @@ -1,22 +0,0 @@ -package server - -import ( - "context" - - "google.golang.org/protobuf/types/known/emptypb" - - template_manager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" - "github.com/e2b-dev/infra/packages/template-manager/internal/template" -) - -func (s *serverStore) TemplateBuildDelete(ctx context.Context, in *template_manager.TemplateBuildDeleteRequest) (*emptypb.Empty, error) { - childCtx, childSpan := s.tracer.Start(ctx, "template-delete-request") - defer childSpan.End() - - err := template.Delete(childCtx, s.tracer, s.ecrClient, s.templateStorage, in.BuildID, in.TemplateID) - if err != nil { - return nil, err - } - - return &emptypb.Empty{}, nil -} diff --git a/packages/template-manager/internal/server/main.go b/packages/template-manager/internal/server/main.go deleted file mode 100644 index e73b7b1..0000000 --- a/packages/template-manager/internal/server/main.go +++ /dev/null @@ -1,94 +0,0 @@ -package server - -import ( - "context" - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ecr" - "github.com/docker/docker/client" - docker "github.com/fsouza/go-dockerclient" - grpc_zap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/recovery" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/keepalive" - - e2bgrpc "github.com/e2b-dev/infra/packages/shared/pkg/grpc" - templatemanager "github.com/e2b-dev/infra/packages/shared/pkg/grpc/template-manager" - "github.com/e2b-dev/infra/packages/shared/pkg/logging" - "github.com/e2b-dev/infra/packages/shared/pkg/consts" - "github.com/e2b-dev/infra/packages/template-manager/internal/constants" - "github.com/e2b-dev/infra/packages/template-manager/internal/template" -) - -type serverStore struct { - templatemanager.UnimplementedTemplateServiceServer - server *grpc.Server - tracer trace.Tracer - dockerClient *client.Client - legacyDockerClient *docker.Client - ecrClient *ecr.ECR - templateStorage *template.Storage -} - -func New(logger *zap.Logger) *grpc.Server { - ctx := context.Background() - log.Println("Initializing template manager") - - opts := []grpc_zap.Option{logging.WithoutHealthCheck()} - - s := grpc.NewServer( - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: 5 * time.Second, // Minimum time between pings from client - PermitWithoutStream: true, // Allow pings even when no active streams - }), - grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: 15 * time.Second, // Server sends keepalive pings every 15s - Timeout: 5 * time.Second, // Wait 5s for response before considering dead - }), - grpc.StatsHandler(e2bgrpc.NewStatsWrapper(otelgrpc.NewServerHandler())), - grpc.ChainUnaryInterceptor( - grpc_zap.UnaryServerInterceptor(logger, opts...), - recovery.UnaryServerInterceptor(), - ), - ) - dockerClient, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - legacyClient, err := docker.NewClientFromEnv() - if err != nil { - panic(err) - } - - // Create AWS session and ECR client - awsSession, err := session.NewSession(&aws.Config{ - Region: aws.String(consts.AWSRegion), - }) - if err != nil { - panic(err) - } - ecrClient := ecr.New(awsSession) - - templateStorage := template.NewStorage(ctx) - - templatemanager.RegisterTemplateServiceServer(s, &serverStore{ - tracer: otel.Tracer(constants.ServiceName), - dockerClient: dockerClient, - legacyDockerClient: legacyClient, - ecrClient: ecrClient, - templateStorage: templateStorage, - }) - - grpc_health_v1.RegisterHealthServer(s, health.NewServer()) - return s -} \ No newline at end of file diff --git a/packages/template-manager/internal/template/main.go b/packages/template-manager/internal/template/main.go deleted file mode 100644 index 46acbbb..0000000 --- a/packages/template-manager/internal/template/main.go +++ /dev/null @@ -1,74 +0,0 @@ -package template - -import ( - "context" - "fmt" - "log" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecr" - "go.opentelemetry.io/otel/trace" - - "github.com/e2b-dev/infra/packages/shared/pkg/consts" - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" -) - -func GetDockerImageTag(templateID string) string { - // Return the ECR image tag - return templateID -} - -func GetDockerImageRepository(templateID string) string { - // Return the ECR repository URI - return fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s/%s", consts.AWSAccountID, consts.AWSRegion, consts.ECRRepository, templateID) - //return fmt.Sprintf("%s.dkr.ecr.%s.amazonaws.com/%s:", consts.AWSAccountID, consts.AWSRegion, consts.ECRRepository) -} - -// GetDockerRepositoryName 返回完整的 ECR 仓库名称 -func GetDockerRepositoryName(templateID string) string { - // 根据截图和您提供的信息,正确的格式是 "e2b-custom-environments/templateID" - return fmt.Sprintf("%s/%s", consts.ECRRepository, templateID) -} - -func Delete( - ctx context.Context, - tracer trace.Tracer, - ecrClient *ecr.ECR, - templateStorage *Storage, - buildId string, - templateID string, -) error { - childCtx, childSpan := tracer.Start(ctx, "delete-template") - defer childSpan.End() - - err := templateStorage.Remove(ctx, buildId) - if err != nil { - return fmt.Errorf("error when deleting template objects: %w", err) - } - - // 获取完整的仓库名称 - repositoryName := GetDockerRepositoryName(templateID) - - // 打印调试信息 - log.Printf("Attempting to delete image with tag '%s' from repository '%s'", buildId, repositoryName) - - _, ecrDeleteErr := ecrClient.BatchDeleteImage(&ecr.BatchDeleteImageInput{ - RepositoryName: aws.String(repositoryName), - ImageIds: []*ecr.ImageIdentifier{ - { - ImageTag: aws.String(buildId), - }, - }, - }) - - if ecrDeleteErr != nil { - errMsg := fmt.Errorf("error when deleting template image from registry: %w", ecrDeleteErr) - telemetry.ReportCriticalError(childCtx, errMsg) - log.Printf("error deleting template image from ECR: %v", ecrDeleteErr) - } else { - telemetry.ReportEvent(childCtx, "deleted template image from registry") - log.Printf("successfully deleted template image %s from ECR", buildId) - } - - return nil -} diff --git a/packages/template-manager/internal/template/storage.go b/packages/template-manager/internal/template/storage.go deleted file mode 100644 index a2c038b..0000000 --- a/packages/template-manager/internal/template/storage.go +++ /dev/null @@ -1,32 +0,0 @@ -package template - -import ( - "context" - "fmt" - - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/shared/pkg/storage/s3" -) - -type Storage struct { - bucket *s3.BucketHandle -} - -func NewStorage(ctx context.Context) *Storage { - return &Storage{ - bucket: s3.GetTemplateBucket(), - } -} - -func (t *Storage) Remove(ctx context.Context, buildId string) error { - err := s3.RemoveDir(ctx, t.bucket, buildId) - if err != nil { - return fmt.Errorf("error when removing template '%s': %w", buildId, err) - } - - return nil -} - -func (t *Storage) NewBuild(files *storage.TemplateFiles) *storage.TemplateBuild { - return storage.NewTemplateBuild(nil, nil, files) -} diff --git a/packages/template-manager/internal/test/build.go b/packages/template-manager/internal/test/build.go deleted file mode 100644 index 1b130e6..0000000 --- a/packages/template-manager/internal/test/build.go +++ /dev/null @@ -1,79 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "os" - "time" - - "github.com/docker/docker/client" - docker "github.com/fsouza/go-dockerclient" - "github.com/rs/zerolog/log" - "go.opentelemetry.io/otel" - - "github.com/e2b-dev/infra/packages/shared/pkg/storage" - "github.com/e2b-dev/infra/packages/template-manager/internal/build" - "github.com/e2b-dev/infra/packages/template-manager/internal/template" -) - -func Build(templateID, buildID string) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute*3) - defer cancel() - - tracer := otel.Tracer("test") - - dockerClient, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - legacyClient, err := docker.NewClientFromEnv() - if err != nil { - panic(err) - } - - var buf bytes.Buffer - t := build.Env{ - TemplateFiles: storage.NewTemplateFiles( - templateID, - buildID, - "vmlinux-5.10.186", - "v1.7.0-dev_8bb88311", - true, - ), - VCpuCount: 2, - MemoryMB: 256, - StartCmd: "", - DiskSizeMB: 512, - BuildLogsWriter: &buf, - } - - err = t.Build(ctx, tracer, dockerClient, legacyClient) - if err != nil { - errMsg := fmt.Errorf("error building template: %w", err) - - fmt.Fprintln(os.Stderr, errMsg) - - return - } - - tempStorage := template.NewStorage(ctx) - - buildStorage := tempStorage.NewBuild(t.TemplateFiles) - - memfilePath := t.BuildMemfilePath() - rootfsPath := t.BuildRootfsPath() - - upload := buildStorage.Upload( - ctx, - t.BuildSnapfilePath(), - &memfilePath, - &rootfsPath, - ) - - err = <-upload - if err != nil { - log.Fatal().Err(err).Msg("error uploading build files") - } -} diff --git a/packages/template-manager/main.go b/packages/template-manager/main.go deleted file mode 100644 index b045454..0000000 --- a/packages/template-manager/main.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "log" - "net" - - "github.com/e2b-dev/infra/packages/shared/pkg/env" - "github.com/e2b-dev/infra/packages/shared/pkg/logging" - "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" - "github.com/e2b-dev/infra/packages/template-manager/internal/constants" - "github.com/e2b-dev/infra/packages/template-manager/internal/server" - "github.com/e2b-dev/infra/packages/template-manager/internal/test" -) - -const defaultPort = 5009 - -var commitSHA string - -func main() { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - testFlag := flag.String("test", "", "run tests") - templateID := flag.String("template", "", "template id") - buildID := flag.String("build", "", "build id") - - port := flag.Int("port", defaultPort, "Port for test HTTP server") - - log.Println("Starting template manager", "commit", commitSHA) - - flag.Parse() - - if err := constants.CheckRequired(); err != nil { - log.Fatalf("Validation for environment variables failed: %v", err) - } - - // If we're running a test, we don't need to start the server - if *testFlag != "" { - switch *testFlag { - case "build": - test.Build(*templateID, *buildID) - return - } - } - - if !env.IsLocal() { - shutdown := telemetry.InitOTLPExporter(ctx, constants.ServiceName, "no") - defer shutdown(context.TODO()) - } - - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - - logger, err := logging.New(env.IsLocal()) - if err != nil { - log.Fatalf("Error initializing logging\n: %v\n", err) - } - - // Create an instance of our handler which satisfies the generated interface - s := server.New(logger.Desugar()) - - log.Printf("Starting server on port %d", *port) - if err := s.Serve(lis); err != nil { - log.Fatalf("failed to serve: %v", err) - } -} diff --git a/packages/template-manager/template-manager.proto b/packages/template-manager/template-manager.proto deleted file mode 100644 index fb1c85b..0000000 --- a/packages/template-manager/template-manager.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -import "google/protobuf/empty.proto"; - -option go_package = "https://github.com/e2b-dev/infra/template-manager"; - - -message TemplateConfig { - string templateID = 1; - string buildID = 2; - - int32 memoryMB = 3; - int32 vCpuCount = 4; - int32 diskSizeMB = 5; - - string kernelVersion = 6; - string firecrackerVersion = 7; - string startCommand = 8; - bool hugePages = 9; -} - -message TemplateCreateRequest { - TemplateConfig template = 1; -} - -// Data required for deleting a template. -message TemplateBuildDeleteRequest { - string buildID = 1; - string templateID = 2; -} - -// Logs from template build -message TemplateBuildLog { - string log = 1; -} - -// Interface exported by the server. -service TemplateService { - // TemplateCreate is a gRPC service that creates a new template - rpc TemplateCreate (TemplateCreateRequest) returns (stream TemplateBuildLog); - // TemplateBuildDelete is a gRPC service that deletes files associated with a template build - rpc TemplateBuildDelete (TemplateBuildDeleteRequest) returns (google.protobuf.Empty); -} diff --git a/packages/template-manager/upload.sh b/packages/template-manager/upload.sh deleted file mode 100644 index 5a2d57b..0000000 --- a/packages/template-manager/upload.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -AWS_S3_BUCKET=$1 -AWS_REGION=${2:-us-east-1} - -chmod +x bin/template-manager - -aws s3 cp \ - --region ${AWS_REGION} \ - --cache-control "no-cache, max-age=0" \ - bin/template-manager "s3://${AWS_S3_BUCKET}/template-manager"