diff --git a/commands/compose.go b/commands/compose.go index 8941eb3c..8c55235c 100644 --- a/commands/compose.go +++ b/commands/compose.go @@ -8,6 +8,8 @@ import ( "strings" "github.com/docker/model-cli/desktop" + "github.com/docker/model-runner/pkg/inference/backends/llamacpp" + "github.com/docker/model-runner/pkg/inference/scheduling" "github.com/spf13/cobra" ) @@ -26,6 +28,9 @@ func newComposeCmd() *cobra.Command { func newUpCommand() *cobra.Command { var models []string + var ctxSize int64 + var rawRuntimeFlags string + var backend string c := &cobra.Command{ Use: "up", RunE: func(cmd *cobra.Command, args []string) error { @@ -35,6 +40,14 @@ func newUpCommand() *cobra.Command { return err } + sendInfo("Initializing model runner...") + if ctxSize != 4096 { + sendInfo(fmt.Sprintf("Setting context size to %d", ctxSize)) + } + if rawRuntimeFlags != "" { + sendInfo("Setting raw runtime flags to " + rawRuntimeFlags) + } + kind := modelRunner.EngineKind() standalone, err := ensureStandaloneRunnerAvailable(cmd.Context(), nil) if err != nil { @@ -50,6 +63,19 @@ func newUpCommand() *cobra.Command { return err } + for _, model := range models { + if err := desktopClient.ConfigureBackend(scheduling.ConfigureRequest{ + Model: model, + ContextSize: ctxSize, + RawRuntimeFlags: rawRuntimeFlags, + }); err != nil { + configErrFmtString := "failed to configure backend for model %s with context-size %d and runtime-flags %s" + _ = sendErrorf(configErrFmtString+": %v", model, ctxSize, rawRuntimeFlags, err) + return fmt.Errorf(configErrFmtString+": %w", model, ctxSize, rawRuntimeFlags, err) + } + sendInfo("Successfully configured backend for model " + model) + } + switch kind { case desktop.ModelRunnerEngineKindDesktop: _ = setenv("URL", "http://model-runner.docker.internal/engines/v1/") @@ -66,6 +92,9 @@ func newUpCommand() *cobra.Command { }, } c.Flags().StringArrayVar(&models, "model", nil, "model to use") + c.Flags().Int64Var(&ctxSize, "context-size", -1, "context size for the model") + c.Flags().StringVar(&rawRuntimeFlags, "runtime-flags", "", "raw runtime flags to pass to the inference engine") + c.Flags().StringVar(&backend, "backend", llamacpp.Name, "inference backend to use") return c } diff --git a/desktop/desktop.go b/desktop/desktop.go index 07e6e364..38740c2a 100644 --- a/desktop/desktop.go +++ b/desktop/desktop.go @@ -14,6 +14,7 @@ import ( "github.com/docker/model-runner/pkg/inference" "github.com/docker/model-runner/pkg/inference/models" + "github.com/docker/model-runner/pkg/inference/scheduling" "github.com/pkg/errors" "go.opentelemetry.io/otel" ) @@ -542,6 +543,30 @@ func (c *Client) Unload(req UnloadRequest) (UnloadResponse, error) { return unloadResp, nil } +func (c *Client) ConfigureBackend(request scheduling.ConfigureRequest) error { + configureBackendPath := inference.InferencePrefix + "/_configure" + jsonData, err := json.Marshal(request) + if err != nil { + return fmt.Errorf("error marshaling request: %w", err) + } + + resp, err := c.doRequest(http.MethodPost, configureBackendPath, bytes.NewReader(jsonData)) + if err != nil { + return c.handleQueryError(err, configureBackendPath) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusAccepted { + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode == http.StatusConflict { + return fmt.Errorf("%s", body) + } + return fmt.Errorf("%s (%s)", body, resp.Status) + } + + return nil +} + // doRequest is a helper function that performs HTTP requests and handles 503 responses func (c *Client) doRequest(method, path string, body io.Reader) (*http.Response, error) { req, err := http.NewRequest(method, c.modelRunner.URL(path), body) diff --git a/docs/reference/docker_model_compose_up.yaml b/docs/reference/docker_model_compose_up.yaml index e377b2cf..7a746d11 100644 --- a/docs/reference/docker_model_compose_up.yaml +++ b/docs/reference/docker_model_compose_up.yaml @@ -3,6 +3,26 @@ usage: docker model compose up pname: docker model compose plink: docker_model_compose.yaml options: + - option: backend + value_type: string + default_value: llama.cpp + description: inference backend to use + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false + - option: context-size + value_type: int64 + default_value: "-1" + description: context size for the model + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false - option: model value_type: stringArray default_value: '[]' @@ -13,6 +33,15 @@ options: experimentalcli: false kubernetes: false swarm: false + - option: runtime-flags + value_type: string + description: raw runtime flags to pass to the inference engine + deprecated: false + hidden: false + experimental: false + experimentalcli: false + kubernetes: false + swarm: false inherited_options: - option: project-name value_type: string diff --git a/go.mod b/go.mod index a7672fb8..bf2071d3 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 github.com/docker/model-distribution v0.0.0-20250512190053-b3792c042d57 - github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88 + github.com/docker/model-runner v0.0.0-20250613083629-6b8c3b816f00 github.com/google/go-containerregistry v0.20.3 github.com/nxadm/tail v1.4.8 github.com/olekukonko/tablewriter v0.0.5 @@ -23,15 +23,20 @@ require ( ) require ( + github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect + github.com/StackExchange/wmi v1.2.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bugsnag/panicwrap v1.3.4 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/containerd/v2 v2.0.4 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v1.0.0-rc.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/creack/pty v1.1.24 // indirect @@ -46,6 +51,7 @@ require ( github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect @@ -54,15 +60,19 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/henvic/httpretty v0.1.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jaypipes/ghw v0.16.0 // indirect + github.com/jaypipes/pcidb v1.0.1 // indirect github.com/jinzhu/gorm v1.9.16 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/locker v1.0.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -110,4 +120,5 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect + howett.net/plist v1.0.0 // indirect ) diff --git a/go.sum b/go.sum index e8894741..af44d257 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -5,10 +7,14 @@ github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0 github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= +github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= @@ -31,10 +37,26 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= +github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= +github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= +github.com/containerd/containerd/v2 v2.0.4 h1:+r7yJMwhTfMm3CDyiBjMBQO8a9CTBxL2Bg/JtqtIwB8= +github.com/containerd/containerd/v2 v2.0.4/go.mod h1:5j9QUUaV/cy9ZeAx4S+8n9ffpf+iYnEj4jiExgcbuLY= +github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= +github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -73,8 +95,8 @@ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/model-distribution v0.0.0-20250512190053-b3792c042d57 h1:ZqfKknb+0/uJid8XLFwSl/osjE+WuS6o6I3dh3ZqO4U= github.com/docker/model-distribution v0.0.0-20250512190053-b3792c042d57/go.mod h1:dThpO9JoG5Px3i+rTluAeZcqLGw8C0qepuEL4gL2o/c= -github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88 h1:NkiizYL67HsCnnlEU6BQVoeiC1bAAyJFxw02bO7JC4E= -github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88/go.mod h1:Nw+rx6RRPNdProEb9/BVJyAQn63px6WWlOv+eEpkV7Q= +github.com/docker/model-runner v0.0.0-20250613083629-6b8c3b816f00 h1:ONUGj59kA60HPJBbVw0FL4+qOK7B1rjgqysA07uEr9g= +github.com/docker/model-runner v0.0.0-20250613083629-6b8c3b816f00/go.mod h1:DVUxuEYA5tqNPk8bk+RrgupuFRytnkNQQcItLAcCJM8= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -93,6 +115,9 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -102,6 +127,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -133,6 +160,11 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jaypipes/ghw v0.16.0 h1:3HurCTS38VNpeQLo5fIdZsySuo/qAfpPSJ5t05QBFPM= +github.com/jaypipes/ghw v0.16.0/go.mod h1:In8SsaDqlb1oTyrbmTC14uy+fbBMvp+xdqX51MidlD8= +github.com/jaypipes/pcidb v1.0.1 h1:WB2zh27T3nwg8AE8ei81sNRb9yWBii3JGNJtT7K9Oic= +github.com/jaypipes/pcidb v1.0.1/go.mod h1:6xYUz/yYEyOkIkUt2t2J2folIuZ4Yg6uByCGFXMCeE4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o= github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= @@ -171,6 +203,8 @@ github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -184,8 +218,14 @@ github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4 h1:BpfhmL github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -288,6 +328,8 @@ github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23env github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= @@ -358,6 +400,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -411,6 +454,7 @@ gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -422,3 +466,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE b/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/README.md b/vendor/github.com/AdaLogics/go-fuzz-headers/README.md new file mode 100644 index 00000000..0a0d60c7 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/README.md @@ -0,0 +1,93 @@ +# go-fuzz-headers +This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers. + + +## Usage +Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine: + +```go +import ( + fuzz "github.com/AdaLogics/go-fuzz-headers" +) +data := []byte{'R', 'a', 'n', 'd', 'o', 'm'} +f := fuzz.NewConsumer(data) + +``` + +This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types. + +After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples: + +### Structs +One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line: +```go +type Person struct { + Name string + Age int +} +p := Person{} +// Fill p with values based on the data provided by the fuzzing engine: +err := f.GenerateStruct(&p) +``` + +This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`: +```go +type PersonI struct { + Name string + Age int + BestFriend PersonII +} +type PersonII struct { + Name string + Age int +} +p := PersonI{} +err := f.GenerateStruct(&p) +``` + +If the consumer should insert values for unexported fields as well as exported, this can be enabled with: + +```go +f.AllowUnexportedFields() +``` + +...and disabled with: + +```go +f.DisallowUnexportedFields() +``` + +### Other types: + +Other useful APIs: + +```go +createdString, err := f.GetString() // Gets a string +createdInt, err := f.GetInt() // Gets an integer +createdByte, err := f.GetByte() // Gets a byte +createdBytes, err := f.GetBytes() // Gets a byte slice +createdBool, err := f.GetBool() // Gets a boolean +err := f.FuzzMap(target_map) // Fills a map +createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive +err := f.CreateFiles(inThisDir) // Fills inThisDir with files +createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength" +``` + +Most APIs are added as they are needed. + +## Projects that use go-fuzz-headers +- [runC](https://github.com/opencontainers/runc) +- [Istio](https://github.com/istio/istio) +- [Vitess](https://github.com/vitessio/vitess) +- [Containerd](https://github.com/containerd/containerd) + +Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it. + + + + +## Status +The project is under development and will be updated regularly. + +## References +go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz). \ No newline at end of file diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go new file mode 100644 index 00000000..361c9ac6 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -0,0 +1,960 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "archive/tar" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +var ( + MaxTotalLen uint32 = 2000000 + maxDepth = 100 +) + +func SetMaxTotalLen(newLen uint32) { + MaxTotalLen = newLen +} + +type ConsumeFuzzer struct { + data []byte + dataTotal uint32 + CommandPart []byte + RestOfArray []byte + NumberOfCalls int + position uint32 + fuzzUnexportedFields bool + forceUTF8Strings bool + curDepth int + Funcs map[reflect.Type]reflect.Value +} + +func IsDivisibleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func NewConsumer(fuzzData []byte) *ConsumeFuzzer { + return &ConsumeFuzzer{ + data: fuzzData, + dataTotal: uint32(len(fuzzData)), + Funcs: make(map[reflect.Type]reflect.Value), + curDepth: 0, + } +} + +func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error { + if f.dataTotal == 0 { + return errors.New("could not split") + } + numberOfCalls := int(f.data[0]) + if numberOfCalls < minCalls || numberOfCalls > maxCalls { + return errors.New("bad number of calls") + } + if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 { + return errors.New("length of data does not match required parameters") + } + + // Define part 2 and 3 of the data array + commandPart := f.data[1 : numberOfCalls+1] + restOfArray := f.data[numberOfCalls+1:] + + // Just a small check. It is necessary + if len(commandPart) != numberOfCalls { + return errors.New("length of commandPart does not match number of calls") + } + + // Check if restOfArray is divisible by numberOfCalls + if !IsDivisibleBy(len(restOfArray), numberOfCalls) { + return errors.New("length of commandPart does not match number of calls") + } + f.CommandPart = commandPart + f.RestOfArray = restOfArray + f.NumberOfCalls = numberOfCalls + return nil +} + +func (f *ConsumeFuzzer) AllowUnexportedFields() { + f.fuzzUnexportedFields = true +} + +func (f *ConsumeFuzzer) DisallowUnexportedFields() { + f.fuzzUnexportedFields = false +} + +func (f *ConsumeFuzzer) AllowNonUTF8Strings() { + f.forceUTF8Strings = false +} + +func (f *ConsumeFuzzer) DisallowNonUTF8Strings() { + f.forceUTF8Strings = true +} + +func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error { + e := reflect.ValueOf(targetStruct).Elem() + return f.fuzzStruct(e, false) +} + +func (f *ConsumeFuzzer) setCustom(v reflect.Value) error { + // First: see if we have a fuzz function for it. + doCustom, ok := f.Funcs[v.Type()] + if !ok { + return fmt.Errorf("could not find a custom function") + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return fmt.Errorf("could not use a custom function") + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return fmt.Errorf("could not use a custom function") + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return fmt.Errorf("could not use a custom function") + } + + verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + F: f, + })}) + + // check if we return an error + if verr[0].IsNil() { + return nil + } + return fmt.Errorf("could not use a custom function") +} + +func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error { + if f.curDepth >= maxDepth { + // return err or nil here? + return nil + } + f.curDepth++ + defer func() { f.curDepth-- }() + + // We check if we should check for custom functions + if customFunctions && e.IsValid() && e.CanAddr() { + err := f.setCustom(e.Addr()) + if err != nil { + return err + } + } + + switch e.Kind() { + case reflect.Struct: + for i := 0; i < e.NumField(); i++ { + var v reflect.Value + if !e.Field(i).CanSet() { + if f.fuzzUnexportedFields { + v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem() + } + if err := f.fuzzStruct(v, customFunctions); err != nil { + return err + } + } else { + v = e.Field(i) + if err := f.fuzzStruct(v, customFunctions); err != nil { + return err + } + } + } + case reflect.String: + str, err := f.GetString() + if err != nil { + return err + } + if e.CanSet() { + e.SetString(str) + } + case reflect.Slice: + var maxElements uint32 + // Byte slices should not be restricted + if e.Type().String() == "[]uint8" { + maxElements = 10000000 + } else { + maxElements = 50 + } + + randQty, err := f.GetUint32() + if err != nil { + return err + } + numOfElements := randQty % maxElements + if (f.dataTotal - f.position) < numOfElements { + numOfElements = f.dataTotal - f.position + } + + uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements)) + + for i := 0; i < int(numOfElements); i++ { + // If we have more than 10, then we can proceed with that. + if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil { + if i >= 10 { + if e.CanSet() { + e.Set(uu) + } + return nil + } else { + return err + } + } + } + if e.CanSet() { + e.Set(uu) + } + case reflect.Uint: + newInt, err := f.GetUint() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Uint16: + newInt, err := f.GetUint16() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Uint32: + newInt, err := f.GetUint32() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Uint64: + newInt, err := f.GetInt() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(newInt)) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + newInt, err := f.GetInt() + if err != nil { + return err + } + if e.CanSet() { + e.SetInt(int64(newInt)) + } + case reflect.Float32: + newFloat, err := f.GetFloat32() + if err != nil { + return err + } + if e.CanSet() { + e.SetFloat(float64(newFloat)) + } + case reflect.Float64: + newFloat, err := f.GetFloat64() + if err != nil { + return err + } + if e.CanSet() { + e.SetFloat(float64(newFloat)) + } + case reflect.Map: + if e.CanSet() { + e.Set(reflect.MakeMap(e.Type())) + const maxElements = 50 + randQty, err := f.GetInt() + if err != nil { + return err + } + numOfElements := randQty % maxElements + for i := 0; i < numOfElements; i++ { + key := reflect.New(e.Type().Key()).Elem() + if err := f.fuzzStruct(key, customFunctions); err != nil { + return err + } + val := reflect.New(e.Type().Elem()).Elem() + if err = f.fuzzStruct(val, customFunctions); err != nil { + return err + } + e.SetMapIndex(key, val) + } + } + case reflect.Ptr: + if e.CanSet() { + e.Set(reflect.New(e.Type().Elem())) + if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil { + return err + } + return nil + } + case reflect.Uint8: + b, err := f.GetByte() + if err != nil { + return err + } + if e.CanSet() { + e.SetUint(uint64(b)) + } + case reflect.Bool: + b, err := f.GetBool() + if err != nil { + return err + } + if e.CanSet() { + e.SetBool(b) + } + } + return nil +} + +func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) { + // The max size of the array: + const max uint32 = 20 + + arraySize := f.position + if arraySize > max { + arraySize = max + } + stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize)) + if f.position+arraySize >= f.dataTotal { + return stringArray, errors.New("could not make string array") + } + + for i := 0; i < int(arraySize); i++ { + stringSize := uint32(f.data[f.position]) + if f.position+stringSize >= f.dataTotal { + return stringArray, nil + } + stringToAppend := string(f.data[f.position : f.position+stringSize]) + strVal := reflect.ValueOf(stringToAppend) + stringArray = reflect.Append(stringArray, strVal) + f.position += stringSize + } + return stringArray, nil +} + +func (f *ConsumeFuzzer) GetInt() (int, error) { + if f.position >= f.dataTotal { + return 0, errors.New("not enough bytes to create int") + } + returnInt := int(f.data[f.position]) + f.position++ + return returnInt, nil +} + +func (f *ConsumeFuzzer) GetByte() (byte, error) { + if f.position >= f.dataTotal { + return 0x00, errors.New("not enough bytes to get byte") + } + returnByte := f.data[f.position] + f.position++ + return returnByte, nil +} + +func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) { + if f.position >= f.dataTotal { + return nil, errors.New("not enough bytes to get byte") + } + returnBytes := make([]byte, 0, numberOfBytes) + for i := 0; i < numberOfBytes; i++ { + newByte, err := f.GetByte() + if err != nil { + return nil, err + } + returnBytes = append(returnBytes, newByte) + } + return returnBytes, nil +} + +func (f *ConsumeFuzzer) GetUint16() (uint16, error) { + u16, err := f.GetNBytes(2) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + return binary.LittleEndian.Uint16(u16), nil + } + return binary.BigEndian.Uint16(u16), nil +} + +func (f *ConsumeFuzzer) GetUint32() (uint32, error) { + u32, err := f.GetNBytes(4) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(u32), nil +} + +func (f *ConsumeFuzzer) GetUint64() (uint64, error) { + u64, err := f.GetNBytes(8) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + return binary.LittleEndian.Uint64(u64), nil + } + return binary.BigEndian.Uint64(u64), nil +} + +func (f *ConsumeFuzzer) GetUint() (uint, error) { + var zero uint + size := int(unsafe.Sizeof(zero)) + if size == 8 { + u64, err := f.GetUint64() + if err != nil { + return 0, err + } + return uint(u64), nil + } + u32, err := f.GetUint32() + if err != nil { + return 0, err + } + return uint(u32), nil +} + +func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { + var length uint32 + var err error + length, err = f.GetUint32() + if err != nil { + return nil, errors.New("not enough bytes to create byte array") + } + + if length == 0 { + length = 30 + } + bytesLeft := f.dataTotal - f.position + if bytesLeft <= 0 { + return nil, errors.New("not enough bytes to create byte array") + } + + // If the length is the same as bytes left, we will not overflow + // the remaining bytes. + if length != bytesLeft { + length = length % bytesLeft + } + byteBegin := f.position + if byteBegin+length < byteBegin { + return nil, errors.New("numbers overflow") + } + f.position = byteBegin + length + return f.data[byteBegin:f.position], nil +} + +func (f *ConsumeFuzzer) GetString() (string, error) { + if f.position >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + length, err := f.GetUint32() + if err != nil { + return "nil", errors.New("not enough bytes to create string") + } + if f.position > MaxTotalLen { + return "nil", errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin+length > f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin > byteBegin+length { + return "nil", errors.New("numbers overflow") + } + f.position = byteBegin + length + s := string(f.data[byteBegin:f.position]) + if f.forceUTF8Strings { + s = strings.ToValidUTF8(s, "") + } + return s, nil +} + +func (f *ConsumeFuzzer) GetBool() (bool, error) { + if f.position >= f.dataTotal { + return false, errors.New("not enough bytes to create bool") + } + if IsDivisibleBy(int(f.data[f.position]), 2) { + f.position++ + return true, nil + } else { + f.position++ + return false, nil + } +} + +func (f *ConsumeFuzzer) FuzzMap(m interface{}) error { + return f.GenerateStruct(m) +} + +func returnTarBytes(buf []byte) ([]byte, error) { + return buf, nil + // Count files + var fileCounter int + tr := tar.NewReader(bytes.NewReader(buf)) + for { + _, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + fileCounter++ + } + if fileCounter >= 1 { + return buf, nil + } + return nil, fmt.Errorf("not enough files were created\n") +} + +func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error { + ind, err := f.GetInt() + if err != nil { + hdr.Format = tar.FormatGNU + //return nil + } + switch ind % 4 { + case 0: + hdr.Format = tar.FormatUnknown + case 1: + hdr.Format = tar.FormatUSTAR + case 2: + hdr.Format = tar.FormatPAX + case 3: + hdr.Format = tar.FormatGNU + } + return nil +} + +func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error { + ind, err := f.GetInt() + if err != nil { + return err + } + switch ind % 13 { + case 0: + hdr.Typeflag = tar.TypeReg + case 1: + hdr.Typeflag = tar.TypeLink + linkname, err := f.GetString() + if err != nil { + return err + } + hdr.Linkname = linkname + case 2: + hdr.Typeflag = tar.TypeSymlink + linkname, err := f.GetString() + if err != nil { + return err + } + hdr.Linkname = linkname + case 3: + hdr.Typeflag = tar.TypeChar + case 4: + hdr.Typeflag = tar.TypeBlock + case 5: + hdr.Typeflag = tar.TypeDir + case 6: + hdr.Typeflag = tar.TypeFifo + case 7: + hdr.Typeflag = tar.TypeCont + case 8: + hdr.Typeflag = tar.TypeXHeader + case 9: + hdr.Typeflag = tar.TypeXGlobalHeader + case 10: + hdr.Typeflag = tar.TypeGNUSparse + case 11: + hdr.Typeflag = tar.TypeGNULongName + case 12: + hdr.Typeflag = tar.TypeGNULongLink + } + return nil +} + +func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { + return f.GetBytes() + /*length, err := f.GetUint32() + if err != nil { + return nil, errors.New("not enough bytes to create byte array") + } + + // A bit of optimization to attempt to create a file body + // when we don't have as many bytes left as "length" + remainingBytes := f.dataTotal - f.position + if remainingBytes <= 0 { + return nil, errors.New("created too large a string") + } + if f.position+length > MaxTotalLen { + return nil, errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return nil, errors.New("not enough bytes to create byte array") + } + if length == 0 { + return nil, errors.New("zero-length is not supported") + } + if byteBegin+length >= f.dataTotal { + return nil, errors.New("not enough bytes to create byte array") + } + if byteBegin+length < byteBegin { + return nil, errors.New("numbers overflow") + } + f.position = byteBegin + length + return f.data[byteBegin:f.position], nil*/ +} + +// getTarFileName is similar to GetString(), but creates string based +// on the length of f.data to reduce the likelihood of overflowing +// f.data. +func (f *ConsumeFuzzer) getTarFilename() (string, error) { + return f.GetString() + /*length, err := f.GetUint32() + if err != nil { + return "nil", errors.New("not enough bytes to create string") + } + + // A bit of optimization to attempt to create a file name + // when we don't have as many bytes left as "length" + remainingBytes := f.dataTotal - f.position + if remainingBytes <= 0 { + return "nil", errors.New("created too large a string") + } + if f.position > MaxTotalLen { + return "nil", errors.New("created too large a string") + } + byteBegin := f.position + if byteBegin >= f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin+length > f.dataTotal { + return "nil", errors.New("not enough bytes to create string") + } + if byteBegin > byteBegin+length { + return "nil", errors.New("numbers overflow") + } + f.position = byteBegin + length + return string(f.data[byteBegin:f.position]), nil*/ +} + +type TarFile struct { + Hdr *tar.Header + Body []byte +} + +// TarBytes returns valid bytes for a tar archive +func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { + numberOfFiles, err := f.GetInt() + if err != nil { + return nil, err + } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 + for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { + var filename string + var filebody []byte + var sec, nsec int + var err error + + filename, err = f.getTarFilename() + if err != nil { + var sb strings.Builder + sb.WriteString("file-") + sb.WriteString(strconv.Itoa(i)) + filename = sb.String() + } + filebody, err = f.createTarFileBody() + if err != nil { + var sb strings.Builder + sb.WriteString("filebody-") + sb.WriteString(strconv.Itoa(i)) + filebody = []byte(sb.String()) + } + + sec, err = f.GetInt() + if err != nil { + sec = 1672531200 // beginning of 2023 + } + nsec, err = f.GetInt() + if err != nil { + nsec = 1703980800 // end of 2023 + } + + hdr := &tar.Header{ + Name: filename, + Size: int64(len(filebody)), + Mode: 0o600, + ModTime: time.Unix(int64(sec), int64(nsec)), + } + if err := setTarHeaderTypeflag(hdr, f); err != nil { + return []byte(""), err + } + if err := setTarHeaderFormat(hdr, f); err != nil { + return []byte(""), err + } + tf := &TarFile{ + Hdr: hdr, + Body: filebody, + } + tarFiles = append(tarFiles, tf) + } + + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + defer tw.Close() + + for _, tf := range tarFiles { + tw.WriteHeader(tf.Hdr) + tw.Write(tf.Body) + } + return buf.Bytes(), nil +} + +// This is similar to TarBytes, but it returns a series of +// files instead of raw tar bytes. The advantage of this +// api is that it is cheaper in terms of cpu power to +// modify or check the files in the fuzzer with TarFiles() +// because it avoids creating a tar reader. +func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) { + numberOfFiles, err := f.GetInt() + if err != nil { + return nil, err + } + var tarFiles []*TarFile + tarFiles = make([]*TarFile, 0) + + const maxNoOfFiles = 100 + for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { + filename, err := f.getTarFilename() + if err != nil { + return tarFiles, err + } + filebody, err := f.createTarFileBody() + if err != nil { + return tarFiles, err + } + + sec, err := f.GetInt() + if err != nil { + return tarFiles, err + } + nsec, err := f.GetInt() + if err != nil { + return tarFiles, err + } + + hdr := &tar.Header{ + Name: filename, + Size: int64(len(filebody)), + Mode: 0o600, + ModTime: time.Unix(int64(sec), int64(nsec)), + } + if err := setTarHeaderTypeflag(hdr, f); err != nil { + hdr.Typeflag = tar.TypeReg + } + if err := setTarHeaderFormat(hdr, f); err != nil { + return tarFiles, err // should not happend + } + tf := &TarFile{ + Hdr: hdr, + Body: filebody, + } + tarFiles = append(tarFiles, tf) + } + return tarFiles, nil +} + +// CreateFiles creates pseudo-random files in rootDir. +// It creates subdirs and places the files there. +// It is the callers responsibility to ensure that +// rootDir exists. +func (f *ConsumeFuzzer) CreateFiles(rootDir string) error { + numberOfFiles, err := f.GetInt() + if err != nil { + return err + } + maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary + if maxNumberOfFiles == 0 { + return errors.New("maxNumberOfFiles is nil") + } + + var noOfCreatedFiles int + for i := 0; i < maxNumberOfFiles; i++ { + // The file to create: + fileName, err := f.GetString() + if err != nil { + if noOfCreatedFiles > 0 { + // If files have been created, we don't return an error. + break + } else { + return errors.New("could not get fileName") + } + } + if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") { + continue + } + fullFilePath := filepath.Join(rootDir, fileName) + + // Find the subdirectory of the file + if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." { + // create the dir first; avoid going outside the root dir + if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") { + continue + } + dirPath := filepath.Join(rootDir, subDir) + if _, err := os.Stat(dirPath); os.IsNotExist(err) { + err2 := os.MkdirAll(dirPath, 0o777) + if err2 != nil { + continue + } + } + fullFilePath = filepath.Join(dirPath, fileName) + } else { + // Create symlink + createSymlink, err := f.GetBool() + if err != nil { + if noOfCreatedFiles > 0 { + break + } else { + return errors.New("could not create the symlink") + } + } + if createSymlink { + symlinkTarget, err := f.GetString() + if err != nil { + return err + } + err = os.Symlink(symlinkTarget, fullFilePath) + if err != nil { + return err + } + // stop loop here, since a symlink needs no further action + noOfCreatedFiles++ + continue + } + // We create a normal file + fileContents, err := f.GetBytes() + if err != nil { + if noOfCreatedFiles > 0 { + break + } else { + return errors.New("could not create the file") + } + } + err = os.WriteFile(fullFilePath, fileContents, 0o666) + if err != nil { + continue + } + noOfCreatedFiles++ + } + } + return nil +} + +// GetStringFrom returns a string that can only consist of characters +// included in possibleChars. It returns an error if the created string +// does not have the specified length. +func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) { + if (f.dataTotal - f.position) < uint32(length) { + return "", errors.New("not enough bytes to create a string") + } + output := make([]byte, 0, length) + for i := 0; i < length; i++ { + charIndex, err := f.GetInt() + if err != nil { + return string(output), err + } + output = append(output, possibleChars[charIndex%len(possibleChars)]) + } + return string(output), nil +} + +func (f *ConsumeFuzzer) GetRune() ([]rune, error) { + stringToConvert, err := f.GetString() + if err != nil { + return []rune("nil"), err + } + return []rune(stringToConvert), nil +} + +func (f *ConsumeFuzzer) GetFloat32() (float32, error) { + u32, err := f.GetNBytes(4) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + u32LE := binary.LittleEndian.Uint32(u32) + return math.Float32frombits(u32LE), nil + } + u32BE := binary.BigEndian.Uint32(u32) + return math.Float32frombits(u32BE), nil +} + +func (f *ConsumeFuzzer) GetFloat64() (float64, error) { + u64, err := f.GetNBytes(8) + if err != nil { + return 0, err + } + littleEndian, err := f.GetBool() + if err != nil { + return 0, err + } + if littleEndian { + u64LE := binary.LittleEndian.Uint64(u64) + return math.Float64frombits(u64LE), nil + } + u64BE := binary.BigEndian.Uint64(u64) + return math.Float64frombits(u64BE), nil +} + +func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error { + return f.GenerateStruct(targetSlice) +} diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go new file mode 100644 index 00000000..8ca3a61b --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go @@ -0,0 +1,62 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "fmt" + "reflect" +) + +type Continue struct { + F *ConsumeFuzzer +} + +func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 1 { + fmt.Println(t.NumIn(), t.NumOut()) + + panic("Need 2 in and 1 out params. In must be the type. Out must be an error") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type Continue") + } + f.Funcs[argT] = v + } +} + +func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error { + e := reflect.ValueOf(targetStruct).Elem() + return f.fuzzStruct(e, true) +} + +func (c Continue) GenerateStruct(targetStruct interface{}) error { + return c.F.GenerateStruct(targetStruct) +} + +func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error { + return c.F.GenerateWithCustom(targetStruct) +} diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go new file mode 100644 index 00000000..2afd49f8 --- /dev/null +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go @@ -0,0 +1,556 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gofuzzheaders + +import ( + "fmt" + "strings" +) + +// returns a keyword by index +func getKeyword(f *ConsumeFuzzer) (string, error) { + index, err := f.GetInt() + if err != nil { + return keywords[0], err + } + for i, k := range keywords { + if i == index { + return k, nil + } + } + return keywords[0], fmt.Errorf("could not get a kw") +} + +// Simple utility function to check if a string +// slice contains a string. +func containsString(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// These keywords are used specifically for fuzzing Vitess +var keywords = []string{ + "accessible", "action", "add", "after", "against", "algorithm", + "all", "alter", "always", "analyze", "and", "as", "asc", "asensitive", + "auto_increment", "avg_row_length", "before", "begin", "between", + "bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit", + "blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade", + "cascaded", "case", "cast", "channel", "change", "char", "character", + "charset", "check", "checksum", "coalesce", "code", "collate", "collation", + "column", "columns", "comment", "committed", "commit", "compact", "complete", + "compressed", "compression", "condition", "connection", "constraint", "continue", + "convert", "copy", "cume_dist", "substr", "substring", "create", "cross", + "csv", "current_date", "current_time", "current_timestamp", "current_user", + "cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond", + "day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare", + "default", "definer", "delay_key_write", "delayed", "delete", "dense_rank", + "desc", "describe", "deterministic", "directory", "disable", "discard", + "disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile", + "duplicate", "dynamic", "each", "else", "elseif", "empty", "enable", + "enclosed", "encryption", "end", "enforced", "engine", "engines", "enum", + "error", "escape", "escaped", "event", "exchange", "exclusive", "exists", + "exit", "explain", "expansion", "export", "extended", "extract", "false", + "fetch", "fields", "first", "first_value", "fixed", "float", "float4", + "float8", "flush", "for", "force", "foreign", "format", "from", "full", + "fulltext", "function", "general", "generated", "geometry", "geometrycollection", + "get", "global", "gtid_executed", "grant", "group", "grouping", "groups", + "group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond", + "hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes", + "infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method", + "int", "int1", "int2", "int3", "int4", "int8", "integer", "interval", + "into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join", + "json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag", + "language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading", + "leave", "left", "less", "level", "like", "limit", "linear", "lines", + "linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs", + "long", "longblob", "longtext", "loop", "low_priority", "manifest", + "master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint", + "mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute", + "minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies", + "multilinestring", "multipoint", "multipolygon", "month", "name", + "names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog", + "nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on", + "only", "open", "optimize", "optimizer_costs", "option", "optionally", + "or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys", + "parser", "partition", "partitioning", "password", "percent_rank", "plugins", + "point", "polygon", "precision", "primary", "privileges", "processlist", + "procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write", + "real", "rebuild", "recursive", "redundant", "references", "regexp", "relay", + "release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable", + "replace", "require", "resignal", "restrict", "return", "retry", "revert", + "revoke", "right", "rlike", "rollback", "row", "row_format", "row_number", + "rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond", + "security", "select", "sensitive", "separator", "sequence", "serializable", + "session", "set", "share", "shared", "show", "signal", "signed", "slow", + "smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate", + "sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows", + "sql_no_cache", "sql_small_result", "ssl", "start", "starting", + "stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status", + "storage", "stored", "straight_join", "stream", "system", "vstream", + "table", "tables", "tablespace", "temporary", "temptable", "terminated", + "text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff", + "tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree", + "traditional", "trigger", "triggers", "true", "truncate", "uncommitted", + "undefined", "undo", "union", "unique", "unlock", "unsigned", "update", + "upgrade", "usage", "use", "user", "user_resources", "using", "utc_date", + "utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary", + "varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex", + "vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata", + "vitess_migration", "vitess_migrations", "vitess_replication_status", + "vitess_shards", "vitess_tablets", "vschema", "warnings", "when", + "where", "while", "window", "with", "without", "work", "write", "xor", + "year", "year_month", "zerofill", +} + +// Keywords that could get an additional keyword +var needCustomString = []string{ + "DISTINCTROW", "FROM", // Select keywords: + "GROUP BY", "HAVING", "WINDOW", + "FOR", + "ORDER BY", "LIMIT", + "INTO", "PARTITION", "AS", // Insert Keywords: + "ON DUPLICATE KEY UPDATE", + "WHERE", "LIMIT", // Delete keywords + "INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords + "TERMINATED BY", "ENCLOSED BY", + "ESCAPED BY", "STARTING BY", + "TERMINATED BY", "STARTING BY", + "IGNORE", + "VALUE", "VALUES", // Replace tokens + "SET", // Update tokens + "ENGINE =", // Drop tokens + "DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens + "COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS", +} + +var alterTableTokens = [][]string{ + {"CUSTOM_FUZZ_STRING"}, + {"CUSTOM_ALTTER_TABLE_OPTIONS"}, + {"PARTITION_OPTIONS_FOR_ALTER_TABLE"}, +} + +var alterTokens = [][]string{ + { + "DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE", + "LOGFILE GROUP", "PROCEDURE", "SERVER", + }, + {"CUSTOM_FUZZ_STRING"}, + { + "ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE", + "ADD UNDOFILE", "OPTIONS", + }, + {"RENAME TO", "INITIAL_SIZE = "}, + {"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"}, + {"COMMENT"}, + {"DO"}, +} + +var setTokens = [][]string{ + {"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"}, + {"CUSTOM_FUZZ_STRING", "DEFAULT", "="}, + {"CUSTOM_FUZZ_STRING"}, +} + +var dropTokens = [][]string{ + {"TEMPORARY", "UNDO"}, + { + "DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP", + "PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM", + "TABLE", "TABLESPACE", "TRIGGER", "VIEW", + }, + {"IF EXISTS"}, + {"CUSTOM_FUZZ_STRING"}, + {"ON", "ENGINE = ", "RESTRICT", "CASCADE"}, +} + +var renameTokens = [][]string{ + {"TABLE"}, + {"CUSTOM_FUZZ_STRING"}, + {"TO"}, + {"CUSTOM_FUZZ_STRING"}, +} + +var truncateTokens = [][]string{ + {"TABLE"}, + {"CUSTOM_FUZZ_STRING"}, +} + +var createTokens = [][]string{ + {"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system + { + "UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE", + "ALGORITHM = TEMPTABLE", + }, + { + "DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP", + "PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE", + "TRIGGER", "VIEW", + }, + {"IF NOT EXISTS"}, + {"CUSTOM_FUZZ_STRING"}, +} + +/* +// For future use. +var updateTokens = [][]string{ + {"LOW_PRIORITY"}, + {"IGNORE"}, + {"SET"}, + {"WHERE"}, + {"ORDER BY"}, + {"LIMIT"}, +} +*/ + +var replaceTokens = [][]string{ + {"LOW_PRIORITY", "DELAYED"}, + {"INTO"}, + {"PARTITION"}, + {"CUSTOM_FUZZ_STRING"}, + {"VALUES", "VALUE"}, +} + +var loadTokens = [][]string{ + {"DATA"}, + {"LOW_PRIORITY", "CONCURRENT", "LOCAL"}, + {"INFILE"}, + {"REPLACE", "IGNORE"}, + {"INTO TABLE"}, + {"PARTITION"}, + {"CHARACTER SET"}, + {"FIELDS", "COLUMNS"}, + {"TERMINATED BY"}, + {"OPTIONALLY"}, + {"ENCLOSED BY"}, + {"ESCAPED BY"}, + {"LINES"}, + {"STARTING BY"}, + {"TERMINATED BY"}, + {"IGNORE"}, + {"LINES", "ROWS"}, + {"CUSTOM_FUZZ_STRING"}, +} + +// These Are everything that comes after "INSERT" +var insertTokens = [][]string{ + {"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"}, + {"INTO"}, + {"PARTITION"}, + {"CUSTOM_FUZZ_STRING"}, + {"AS"}, + {"ON DUPLICATE KEY UPDATE"}, +} + +// These are everything that comes after "SELECT" +var selectTokens = [][]string{ + {"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"}, + {"HIGH_PRIORITY"}, + {"STRAIGHT_JOIN"}, + {"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"}, + {"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"}, + {"CUSTOM_FUZZ_STRING"}, + {"FROM"}, + {"WHERE"}, + {"GROUP BY"}, + {"HAVING"}, + {"WINDOW"}, + {"ORDER BY"}, + {"LIMIT"}, + {"CUSTOM_FUZZ_STRING"}, + {"FOR"}, +} + +// These are everything that comes after "DELETE" +var deleteTokens = [][]string{ + {"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"}, + {"PARTITION"}, + {"WHERE"}, + {"ORDER BY"}, + {"LIMIT"}, +} + +var alter_table_options = []string{ + "ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL", + "CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER", + "NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE", + "CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK", + "FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN", + "AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION", + "DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION", + "COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION", + "ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION", + "REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT", + "KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH", + "CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT", + "COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT", + "STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS", + "MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY", + "DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION", +} + +// Creates an 'alter table' statement. 'alter table' is an exception +// in that it has its own function. The majority of statements +// are created by 'createStmt()'. +func createAlterTableStmt(f *ConsumeFuzzer) (string, error) { + maxArgs, err := f.GetInt() + if err != nil { + return "", err + } + maxArgs = maxArgs % 30 + if maxArgs == 0 { + return "", fmt.Errorf("could not create alter table stmt") + } + + var stmt strings.Builder + stmt.WriteString("ALTER TABLE ") + for i := 0; i < maxArgs; i++ { + // Calculate if we get existing token or custom string + tokenType, err := f.GetInt() + if err != nil { + return "", err + } + if tokenType%4 == 1 { + customString, err := f.GetString() + if err != nil { + return "", err + } + stmt.WriteString(" " + customString) + } else { + tokenIndex, err := f.GetInt() + if err != nil { + return "", err + } + stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)]) + } + } + return stmt.String(), nil +} + +func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) { + index, err := f.GetInt() + if err != nil { + return "", err + } + var token strings.Builder + token.WriteString(tokens[index%len(tokens)]) + if token.String() == "CUSTOM_FUZZ_STRING" { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + return customFuzzString, nil + } + + // Check if token requires an argument + if containsString(needCustomString, token.String()) { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + token.WriteString(" " + customFuzzString) + } + return token.String(), nil +} + +var stmtTypes = map[string][][]string{ + "DELETE": deleteTokens, + "INSERT": insertTokens, + "SELECT": selectTokens, + "LOAD": loadTokens, + "REPLACE": replaceTokens, + "CREATE": createTokens, + "DROP": dropTokens, + "RENAME": renameTokens, + "TRUNCATE": truncateTokens, + "SET": setTokens, + "ALTER": alterTokens, + "ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens +} + +var stmtTypeEnum = map[int]string{ + 0: "DELETE", + 1: "INSERT", + 2: "SELECT", + 3: "LOAD", + 4: "REPLACE", + 5: "CREATE", + 6: "DROP", + 7: "RENAME", + 8: "TRUNCATE", + 9: "SET", + 10: "ALTER", + 11: "ALTER TABLE", +} + +func createStmt(f *ConsumeFuzzer) (string, error) { + stmtIndex, err := f.GetInt() + if err != nil { + return "", err + } + stmtIndex = stmtIndex % len(stmtTypes) + + queryType := stmtTypeEnum[stmtIndex] + tokens := stmtTypes[queryType] + + // We have custom creator for ALTER TABLE + if queryType == "ALTER TABLE" { + query, err := createAlterTableStmt(f) + if err != nil { + return "", err + } + return query, nil + } + + // Here we are creating a query that is not + // an 'alter table' query. For available + // queries, see "stmtTypes" + + // First specify the first query keyword: + var query strings.Builder + query.WriteString(queryType) + + // Next create the args for the + queryArgs, err := createStmtArgs(tokens, f) + if err != nil { + return "", err + } + query.WriteString(" " + queryArgs) + return query.String(), nil +} + +// Creates the arguments of a statements. In a select statement +// that would be everything after "select". +func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) { + var query, token strings.Builder + + // We go through the tokens in the tokenslice, + // create the respective token and add it to + // "query" + for _, tokens := range tokenslice { + // For extra randomization, the fuzzer can + // choose to not include this token. + includeThisToken, err := f.GetBool() + if err != nil { + return "", err + } + if !includeThisToken { + continue + } + + // There may be several tokens to choose from: + if len(tokens) > 1 { + chosenToken, err := chooseToken(tokens, f) + if err != nil { + return "", err + } + query.WriteString(" " + chosenToken) + } else { + token.WriteString(tokens[0]) + + // In case the token is "CUSTOM_FUZZ_STRING" + // we will then create a non-structured string + if token.String() == "CUSTOM_FUZZ_STRING" { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + query.WriteString(" " + customFuzzString) + continue + } + + // Check if token requires an argument. + // Tokens that take an argument can be found + // in 'needCustomString'. If so, we add a + // non-structured string to the token. + if containsString(needCustomString, token.String()) { + customFuzzString, err := f.GetString() + if err != nil { + return "", err + } + token.WriteString(fmt.Sprintf(" %s", customFuzzString)) + } + query.WriteString(fmt.Sprintf(" %s", token.String())) + } + } + return query.String(), nil +} + +// Creates a semi-structured query. It creates a string +// that is a combination of the keywords and random strings. +func createQuery(f *ConsumeFuzzer) (string, error) { + queryLen, err := f.GetInt() + if err != nil { + return "", err + } + maxLen := queryLen % 60 + if maxLen == 0 { + return "", fmt.Errorf("could not create a query") + } + var query strings.Builder + for i := 0; i < maxLen; i++ { + // Get a new token: + useKeyword, err := f.GetBool() + if err != nil { + return "", err + } + if useKeyword { + keyword, err := getKeyword(f) + if err != nil { + return "", err + } + query.WriteString(" " + keyword) + } else { + customString, err := f.GetString() + if err != nil { + return "", err + } + query.WriteString(" " + customString) + } + } + if query.String() == "" { + return "", fmt.Errorf("could not create a query") + } + return query.String(), nil +} + +// GetSQLString is the API that users interact with. +// +// Usage: +// +// f := NewConsumer(data) +// sqlString, err := f.GetSQLString() +func (f *ConsumeFuzzer) GetSQLString() (string, error) { + var query string + veryStructured, err := f.GetBool() + if err != nil { + return "", err + } + if veryStructured { + query, err = createStmt(f) + if err != nil { + return "", err + } + } else { + query, err = createQuery(f) + if err != nil { + return "", err + } + } + return query, nil +} diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE new file mode 100644 index 00000000..ae80b672 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md new file mode 100644 index 00000000..c4a432d6 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/README.md @@ -0,0 +1,13 @@ +wmi +=== + +Package wmi provides a WQL interface to Windows WMI. + +Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. + +--- + +NOTE: This project is no longer being actively maintained. If you would like +to become its new owner, please contact tlimoncelli at stack over flow dot com. + +--- diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go new file mode 100644 index 00000000..3ff87563 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/swbemservices.go @@ -0,0 +1,260 @@ +// +build windows + +package wmi + +import ( + "fmt" + "reflect" + "runtime" + "sync" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx +type SWbemServices struct { + //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance + cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method + sWbemLocatorIUnknown *ole.IUnknown + sWbemLocatorIDispatch *ole.IDispatch + queries chan *queryRequest + closeError chan error + lQueryorClose sync.Mutex +} + +type queryRequest struct { + query string + dst interface{} + args []interface{} + finished chan error +} + +// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI +func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { + //fmt.Println("InitializeSWbemServices: Starting") + //TODO: implement connectServerArgs as optional argument for init with connectServer call + s := new(SWbemServices) + s.cWMIClient = c + s.queries = make(chan *queryRequest) + initError := make(chan error) + go s.process(initError) + + err, ok := <-initError + if ok { + return nil, err //Send error to caller + } + //fmt.Println("InitializeSWbemServices: Finished") + return s, nil +} + +// Close will clear and release all of the SWbemServices resources +func (s *SWbemServices) Close() error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + //fmt.Println("Close: sending close request") + var result error + ce := make(chan error) + s.closeError = ce //Race condition if multiple callers to close. May need to lock here + close(s.queries) //Tell background to shut things down + s.lQueryorClose.Unlock() + err, ok := <-ce + if ok { + result = err + } + //fmt.Println("Close: finished") + return result +} + +func (s *SWbemServices) process(initError chan error) { + //fmt.Println("process: starting background thread initialization") + //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) + return + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) + return + } else if unknown == nil { + initError <- ErrNilCreateObject + return + } + defer unknown.Release() + s.sWbemLocatorIUnknown = unknown + + dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) + return + } + defer dispatch.Release() + s.sWbemLocatorIDispatch = dispatch + + // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs + //fmt.Println("process: initialized. closing initError") + close(initError) + //fmt.Println("process: waiting for queries") + for q := range s.queries { + //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) + errQuery := s.queryBackground(q) + //fmt.Println("process: s.queryBackground finished") + if errQuery != nil { + q.finished <- errQuery + } + close(q.finished) + } + //fmt.Println("process: queries channel closed") + s.queries = nil //set channel to nil so we know it is closed + //TODO: I think the Release/Clear calls can panic if things are in a bad state. + //TODO: May need to recover from panics and send error to method caller instead. + close(s.closeError) +} + +// Query runs the WQL query using a SWbemServices instance and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + + //fmt.Println("Query: Sending query request") + qr := queryRequest{ + query: query, + dst: dst, + args: connectServerArgs, + finished: make(chan error), + } + s.queries <- &qr + s.lQueryorClose.Unlock() + err, ok := <-qr.finished + if ok { + //fmt.Println("Query: Finished with error") + return err //Send error to caller + } + //fmt.Println("Query: Finished") + return nil +} + +func (s *SWbemServices) queryBackground(q *queryRequest) error { + if s == nil || s.sWbemLocatorIDispatch == nil { + return fmt.Errorf("SWbemServices is not Initialized") + } + wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart + //fmt.Println("queryBackground: Starting") + + dv := reflect.ValueOf(q.dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + //fmt.Println("queryBackground: Finished") + return errFieldMismatch +} diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go new file mode 100644 index 00000000..b4bb4f09 --- /dev/null +++ b/vendor/github.com/StackExchange/wmi/wmi.go @@ -0,0 +1,590 @@ +// +build windows + +/* +Package wmi provides a WQL interface for WMI on Windows. + +Example code to print names of running processes: + + type Win32_Process struct { + Name string + } + + func main() { + var dst []Win32_Process + q := wmi.CreateQuery(&dst, "") + err := wmi.Query(q, &dst) + if err != nil { + log.Fatal(err) + } + for i, v := range dst { + println(i, v.Name) + } + } + +*/ +package wmi + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +var l = log.New(os.Stdout, "", log.LstdFlags) + +var ( + ErrInvalidEntityType = errors.New("wmi: invalid entity type") + // ErrNilCreateObject is the error returned if CreateObject returns nil even + // if the error was nil. + ErrNilCreateObject = errors.New("wmi: create object returned nil") + lock sync.Mutex +) + +// S_FALSE is returned by CoInitializeEx if it was already called on this thread. +const S_FALSE = 0x00000001 + +// QueryNamespace invokes Query with the given namespace on the local machine. +func QueryNamespace(query string, dst interface{}, namespace string) error { + return Query(query, dst, nil, namespace) +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +// +// Query is a wrapper around DefaultClient.Query. +func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + if DefaultClient.SWbemServicesClient == nil { + return DefaultClient.Query(query, dst, connectServerArgs...) + } + return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) +} + +// CallMethod calls a method named methodName on an instance of the class named +// className, with the given params. +// +// CallMethod is a wrapper around DefaultClient.CallMethod. +func CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) { + return DefaultClient.CallMethod(connectServerArgs, className, methodName, params) +} + +// A Client is an WMI query client. +// +// Its zero value (DefaultClient) is a usable client. +type Client struct { + // NonePtrZero specifies if nil values for fields which aren't pointers + // should be returned as the field types zero value. + // + // Setting this to true allows stucts without pointer fields to be used + // without the risk failure should a nil value returned from WMI. + NonePtrZero bool + + // PtrNil specifies if nil values for pointer fields should be returned + // as nil. + // + // Setting this to true will set pointer fields to nil where WMI + // returned nil, otherwise the types zero value will be returned. + PtrNil bool + + // AllowMissingFields specifies that struct fields not present in the + // query result should not result in an error. + // + // Setting this to true allows custom queries to be used with full + // struct definitions instead of having to define multiple structs. + AllowMissingFields bool + + // SWbemServiceClient is an optional SWbemServices object that can be + // initialized and then reused across multiple queries. If it is null + // then the method will initialize a new temporary client each time. + SWbemServicesClient *SWbemServices +} + +// DefaultClient is the default Client and is used by Query, QueryNamespace, and CallMethod. +var DefaultClient = &Client{} + +// coinitService coinitializes WMI service. If no error is returned, a cleanup function +// is returned which must be executed (usually deferred) to clean up allocated resources. +func (c *Client) coinitService(connectServerArgs ...interface{}) (*ole.IDispatch, func(), error) { + var unknown *ole.IUnknown + var wmi *ole.IDispatch + var serviceRaw *ole.VARIANT + + // be sure teardown happens in the reverse + // order from that which they were created + deferFn := func() { + if serviceRaw != nil { + serviceRaw.Clear() + } + if wmi != nil { + wmi.Release() + } + if unknown != nil { + unknown.Release() + } + ole.CoUninitialize() + } + + // if we error'ed here, clean up immediately + var err error + defer func() { + if err != nil { + deferFn() + } + }() + + err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + return nil, nil, err + } + } + + unknown, err = oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + return nil, nil, err + } else if unknown == nil { + return nil, nil, ErrNilCreateObject + } + + wmi, err = unknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + return nil, nil, err + } + + // service is a SWbemServices + serviceRaw, err = oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) + if err != nil { + return nil, nil, err + } + + return serviceRaw.ToIDispatch(), deferFn, nil +} + +// CallMethod calls a WMI method named methodName on an instance +// of the class named className. It passes in the arguments given +// in params. Use connectServerArgs to customize the machine and +// namespace; by default, the local machine and default namespace +// are used. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +func (c *Client) CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) { + service, cleanup, err := c.coinitService(connectServerArgs...) + if err != nil { + return 0, fmt.Errorf("coinit: %v", err) + } + defer cleanup() + + // Get class + classRaw, err := oleutil.CallMethod(service, "Get", className) + if err != nil { + return 0, fmt.Errorf("CallMethod Get class %s: %v", className, err) + } + class := classRaw.ToIDispatch() + defer classRaw.Clear() + + // Run method + resultRaw, err := oleutil.CallMethod(class, methodName, params...) + if err != nil { + return 0, fmt.Errorf("CallMethod %s.%s: %v", className, methodName, err) + } + resultInt, ok := resultRaw.Value().(int32) + if !ok { + return 0, fmt.Errorf("return value was not an int32: %v (%T)", resultRaw, resultRaw) + } + + return resultInt, nil +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + dv := reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + lock.Lock() + defer lock.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + service, cleanup, err := c.coinitService(connectServerArgs...) + if err != nil { + return err + } + defer cleanup() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = c.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + return errFieldMismatch +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +var timeType = reflect.TypeOf(time.Time{}) + +// loadEntity loads a SWbemObject into a struct pointer. +func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { + v := reflect.ValueOf(dst).Elem() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + of := f + isPtr := f.Kind() == reflect.Ptr + if isPtr { + ptr := reflect.New(f.Type().Elem()) + f.Set(ptr) + f = f.Elem() + } + n := v.Type().Field(i).Name + if n[0] < 'A' || n[0] > 'Z' { + continue + } + if !f.CanSet() { + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "CanSet() is false", + } + } + prop, err := oleutil.GetProperty(src, n) + if err != nil { + if !c.AllowMissingFields { + errFieldMismatch = &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "no such struct field", + } + } + continue + } + defer prop.Clear() + + if prop.VT == 0x1 { //VT_NULL + continue + } + + switch val := prop.Value().(type) { + case int8, int16, int32, int64, int: + v := reflect.ValueOf(val).Int() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(v) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(uint64(v)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case uint8, uint16, uint32, uint64: + v := reflect.ValueOf(val).Uint() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(int64(v)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(v) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case string: + switch f.Kind() { + case reflect.String: + f.SetString(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + iv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + f.SetInt(iv) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uv, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + f.SetUint(uv) + case reflect.Struct: + switch f.Type() { + case timeType: + if len(val) == 25 { + mins, err := strconv.Atoi(val[22:]) + if err != nil { + return err + } + val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) + } + t, err := time.Parse("20060102150405.000000-0700", val) + if err != nil { + return err + } + f.Set(reflect.ValueOf(t)) + } + } + case bool: + switch f.Kind() { + case reflect.Bool: + f.SetBool(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a bool", + } + } + case float32: + switch f.Kind() { + case reflect.Float32: + f.SetFloat(float64(val)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float32", + } + } + default: + if f.Kind() == reflect.Slice { + switch f.Type().Elem().Kind() { + case reflect.String: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetString(v.(string)) + } + f.Set(fArr) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetUint(reflect.ValueOf(v).Uint()) + } + f.Set(fArr) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetInt(reflect.ValueOf(v).Int()) + } + f.Set(fArr) + } + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported slice type (%T)", val), + } + } + } else { + typeof := reflect.TypeOf(val) + if typeof == nil && (isPtr || c.NonePtrZero) { + if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { + of.Set(reflect.Zero(of.Type())) + } + break + } + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported type (%T)", val), + } + } + } + } + return errFieldMismatch +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypeStruct + multiArgTypeStructPtr +) + +// checkMultiArg checks that v has type []S, []*S for some struct type S. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +func oleInt64(item *ole.IDispatch, prop string) (int64, error) { + v, err := oleutil.GetProperty(item, prop) + if err != nil { + return 0, err + } + defer v.Clear() + + i := int64(v.Val) + return i, nil +} + +// CreateQuery returns a WQL query string that queries all columns of src. where +// is an optional string that is appended to the query, to be used with WHERE +// clauses. In such a case, the "WHERE" string should appear at the beginning. +// The wmi class is obtained by the name of the type. You can pass a optional +// class throught the variadic class parameter which is useful for anonymous +// structs. +func CreateQuery(src interface{}, where string, class ...string) string { + var b bytes.Buffer + b.WriteString("SELECT ") + s := reflect.Indirect(reflect.ValueOf(src)) + t := s.Type() + if s.Kind() == reflect.Slice { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return "" + } + var fields []string + for i := 0; i < t.NumField(); i++ { + fields = append(fields, t.Field(i).Name) + } + b.WriteString(strings.Join(fields, ", ")) + b.WriteString(" FROM ") + if len(class) > 0 { + b.WriteString(class[0]) + } else { + b.WriteString(t.Name()) + } + b.WriteString(" " + where) + return b.String() +} diff --git a/vendor/github.com/containerd/containerd/v2/LICENSE b/vendor/github.com/containerd/containerd/v2/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/v2/NOTICE b/vendor/github.com/containerd/containerd/v2/NOTICE new file mode 100644 index 00000000..8915f027 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go b/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go new file mode 100644 index 00000000..4ba6cc74 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/content/adaptor.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "strings" + + "github.com/containerd/containerd/v2/pkg/filters" +) + +// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. +func AdaptInfo(info Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/v2/core/content/content.go b/vendor/github.com/containerd/containerd/v2/core/content/content.go new file mode 100644 index 00000000..66b42a9c --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/content/content.go @@ -0,0 +1,212 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +// +// Overall content lifecycle: +// - Ingester is used to initiate a write operation (aka ingestion) +// - IngestManager is used to manage (e.g. list, abort) active ingestions +// - Once an ingestion is complete (see Writer.Commit), Provider is used to +// query a single piece of content by its digest +// - Manager is used to manage (e.g. list, delete) previously committed content +// +// Note that until ingestion is complete, its content is not visible through +// Provider or Manager. Once ingestion is complete, it is no longer exposed +// through IngestManager. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + +// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer +type ReaderAt interface { + io.ReaderAt + io.Closer + Size() int64 +} + +// Provider provides a reader interface for specific content +type Provider interface { + // ReaderAt only requires desc.Digest to be set. + // Other fields in the descriptor may be used internally for resolving + // the location of the actual data. + ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) +} + +// Ingester writes content +type Ingester interface { + // Writer initiates a writing operation (aka ingestion). A single ingestion + // is uniquely identified by its ref, provided using a WithRef option. + // Writer can be called multiple times with the same ref to access the same + // ingestion. + // Once all the data is written, use Writer.Commit to complete the ingestion. + Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) +} + +// IngestManager provides methods for managing ingestions. An ingestion is a +// not-yet-complete writing operation initiated using Ingester and identified +// by a ref string. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match + // the provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + +// Info holds content specific information +type Info struct { + Digest digest.Digest + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + Labels map[string]string +} + +// Status of a content operation (i.e. an ingestion) +type Status struct { + Ref string + Offset int64 + Total int64 + Expected digest.Digest + StartedAt time.Time + UpdatedAt time.Time +} + +// WalkFunc defines the callback for a blob walk. +type WalkFunc func(Info) error + +// InfoReaderProvider provides both info and reader for the specific content. +type InfoReaderProvider interface { + InfoProvider + Provider +} + +// InfoProvider provides info for content inspection. +type InfoProvider interface { + // Info will return metadata about content available in the content store. + // + // If the content is not present, ErrNotFound will be returned. + Info(ctx context.Context, dgst digest.Digest) (Info, error) +} + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + InfoProvider + + // Update updates mutable information related to content. + // If one or more fieldpaths are provided, only those + // fields will be updated. + // Mutable fields: + // labels.* + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Walk will call fn for each item in the content store which + // match the provided filters. If no filters are given all + // items will be walked. + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Delete removes the content from the store. + Delete(ctx context.Context, dgst digest.Digest) error +} + +// Writer handles writing of content into a content store +type Writer interface { + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. + io.WriteCloser + + // Digest may return empty digest or panics until committed. + Digest() digest.Digest + + // Commit commits the blob (but no roll-back is guaranteed on an error). + // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. + Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error + + // Status returns the current state of write + Status() (Status, error) + + // Truncate updates the size of the target blob + Truncate(size int64) error +} + +type Syncer interface { + // Sync flushes the in-flight writes to the disk (when applicable) + Sync() error +} + +// Opt is used to alter the mutable properties of content +type Opt func(*Info) error + +// WithLabels allows labels to be set on content +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// WriterOpts is internally used by WriterOpt. +type WriterOpts struct { + Ref string + Desc ocispec.Descriptor +} + +// WriterOpt is used for passing options to Ingester.Writer. +type WriterOpt func(*WriterOpts) error + +// WithDescriptor specifies an OCI descriptor. +// Writer may optionally use the descriptor internally for resolving +// the location of the actual data. +// Write does not require any field of desc to be set. +// If the data size is unknown, desc.Size should be set to 0. +// Some implementations may also accept negative values as "unknown". +func WithDescriptor(desc ocispec.Descriptor) WriterOpt { + return func(opts *WriterOpts) error { + opts.Desc = desc + return nil + } +} + +// WithRef specifies a ref string. +func WithRef(ref string) WriterOpt { + return func(opts *WriterOpts) error { + opts.Ref = ref + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/v2/core/content/helpers.go b/vendor/github.com/containerd/containerd/v2/core/content/helpers.go new file mode 100644 index 00000000..74cb566b --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/content/helpers.go @@ -0,0 +1,353 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "sync" + "time" + + "github.com/containerd/containerd/v2/internal/randutil" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ErrReset = errors.New("writer has been reset") + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +type reader interface { + Reader() io.Reader +} + +// NewReader returns a io.Reader from a ReaderAt +func NewReader(ra ReaderAt) io.Reader { + if rd, ok := ra.(reader); ok { + return rd.Reader() + } + return io.NewSectionReader(ra, 0, ra.Size()) +} + +type nopCloserBytesReader struct { + *bytes.Reader +} + +func (*nopCloserBytesReader) Close() error { return nil } + +type nopCloserSectionReader struct { + *io.SectionReader +} + +func (*nopCloserSectionReader) Close() error { return nil } + +// BlobReadSeeker returns a read seeker for the blob from the provider. +func BlobReadSeeker(ctx context.Context, provider Provider, desc ocispec.Descriptor) (io.ReadSeekCloser, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return &nopCloserBytesReader{bytes.NewReader(desc.Data)}, nil + } + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + return &nopCloserSectionReader{io.NewSectionReader(ra, 0, ra.Size())}, nil +} + +// ReadBlob retrieves the entire contents of the blob from the provider. +// +// Avoid using this for large blobs, such as layers. +func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return desc.Data, nil + } + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + + p := make([]byte, ra.Size()) + + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return p, err +} + +// WriteBlob writes data with the expected digest into the content store. If +// expected already exists, the method returns immediately and the reader will +// not be consumed. +// +// This is useful when the digest and size are known beforehand. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { + cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed to open writer: %w", err) + } + + return nil // already present + } + defer cw.Close() + + return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) +} + +// OpenWriter opens a new writer for the given reference, retrying if the writer +// is locked until the reference is available or returns an error. +func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { + var ( + cw Writer + err error + retry = 16 + ) + for { + cw, err = cs.Writer(ctx, opts...) + if err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + // TODO: Check status to determine if the writer is active, + // continue waiting while active, otherwise return lock + // error or abort. Requires asserting for an ingest manager + + select { + case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): + if retry < 2048 { + retry = retry << 1 + } + continue + case <-ctx.Done(): + // Propagate lock error + return nil, err + } + + } + break + } + + return cw, err +} + +// Copy copies data with the expected digest from the reader into the +// provided content store writer. This copy commits the writer. +// +// This is useful when the digest and size are known beforehand. When +// the size or digest is unknown, these values may be empty. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { + r := or + for i := 0; ; i++ { + if i >= 1 { + log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") + } + + ws, err := cw.Status() + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + // Reset the original reader if + // 1. there is an offset, or + // 2. this is a retry due to Reset error + if ws.Offset > 0 || i > 0 { + r, err = seekReader(or, ws.Offset, size) + if err != nil { + return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + copied, err := copyWithBuffer(cw, r) + if errors.Is(err, ErrReset) { + continue + } + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if errors.Is(err, ErrReset) { + continue + } + if !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + } + return nil + } +} + +// CopyReaderAt copies to a writer from a given reader at for the given +// number of bytes. This copy does not commit the writer. +func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { + ws, err := cw.Status() + if err != nil { + return err + } + + copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) + if err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + if copied < n { + // Short writes would return its own error, this indicates a read failure + return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + } + return nil +} + +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, fmt.Errorf("failed to get status: %w", err) + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) + } + } + + return copyWithBuffer(cw, r) +} + +// seekReader attempts to seek the reader to the given offset, either by +// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding +// up to the given offset. +func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { + // attempt to resolve r as a seeker and setup the offset. + seeker, ok := r.(io.Seeker) + if ok { + nn, err := seeker.Seek(offset, io.SeekStart) + if nn != offset { + if err == nil { + err = fmt.Errorf("unexpected seek location without seek error") + } + return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) + } + + if err != nil { + return nil, err + } + + return r, nil + } + + // ok, let's try io.ReaderAt! + readerAt, ok := r.(io.ReaderAt) + if ok && size > offset { + sr := io.NewSectionReader(readerAt, offset, size) + return sr, nil + } + + // well then, let's just discard up to the offset + n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) + if err != nil { + return nil, fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return nil, errors.New("unable to discard to offset") + } + + return r, nil +} + +// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer +// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have +// a full buffer before we do a write operation to dst to reduce overheads associated +// with the write operations of small buffers. +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + // If the reader has a WriteTo method, use it to do the copy. + // Avoids an allocation and a copy. + if wt, ok := src.(io.WriterTo); ok { + return wt.WriteTo(dst) + } + // Similarly, if the writer has a ReadFrom method, use it to do the copy. + if rt, ok := dst.(io.ReaderFrom); ok { + return rt.ReadFrom(src) + } + bufRef := bufPool.Get().(*[]byte) + defer bufPool.Put(bufRef) + buf := *bufRef + for { + nr, er := io.ReadAtLeast(src, buf, len(buf)) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + // If an EOF happens after reading fewer than the requested bytes, + // ReadAtLeast returns ErrUnexpectedEOF. + if er != io.EOF && er != io.ErrUnexpectedEOF { + err = er + } + break + } + } + return +} + +// Exists returns whether an attempt to access the content would not error out +// with an ErrNotFound error. It will return an encountered error if it was +// different than ErrNotFound. +func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) { + _, err := provider.Info(ctx, desc.Digest) + if errdefs.IsNotFound(err) { + return false, nil + } + return err == nil, err +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/annotations.go b/vendor/github.com/containerd/containerd/v2/core/images/annotations.go new file mode 100644 index 00000000..47d92104 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/annotations.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go new file mode 100644 index 00000000..3d98daea --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/exporter.go @@ -0,0 +1,589 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "context" + "encoding/json" + "fmt" + "io" + "path" + "sort" + "strings" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type exportOptions struct { + manifests []ocispec.Descriptor + platform platforms.MatchComparer + allPlatforms bool + skipDockerManifest bool + blobRecordOptions blobRecordOptions +} + +// ExportOpt defines options for configuring exported descriptors +type ExportOpt func(context.Context, *exportOptions) error + +// WithPlatform defines the platform to require manifest lists have +// not exporting all platforms. +// Additionally, platform is used to resolve image configs for +// Docker v1.1, v1.2 format compatibility. +func WithPlatform(p platforms.MatchComparer) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.platform = p + return nil + } +} + +// WithAllPlatforms exports all manifests from a manifest list. +// Missing content will fail the export. +func WithAllPlatforms() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.allPlatforms = true + return nil + } +} + +// WithSkipDockerManifest skips creation of the Docker compatible +// manifest.json file. +func WithSkipDockerManifest() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.skipDockerManifest = true + return nil + } +} + +// WithImage adds the provided images to the exported archive. +func WithImage(is images.Store, name string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + img, err := is.Get(ctx, name) + if err != nil { + return err + } + + img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + + return nil + } +} + +// WithImages adds multiples images to the exported archive. +func WithImages(imgs []images.Image) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + for _, img := range imgs { + img.Target.Annotations = addNameAnnotation(img.Name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + } + + return nil + } +} + +// WithManifest adds a manifest to the exported archive. +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + if len(names) == 0 { + o.manifests = append(o.manifests, manifest) + } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } + + return nil + } +} + +// BlobFilter returns false if the blob should not be included in the archive. +type BlobFilter func(ocispec.Descriptor) bool + +// WithBlobFilter specifies BlobFilter. +func WithBlobFilter(f BlobFilter) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.blobRecordOptions.blobFilter = f + return nil + } +} + +// WithSkipNonDistributableBlobs excludes non-distributable blobs such as Windows base layers. +func WithSkipNonDistributableBlobs() ExportOpt { + f := func(desc ocispec.Descriptor) bool { + return !images.IsNonDistributable(desc.MediaType) + } + return WithBlobFilter(f) +} + +// WithSkipMissing excludes blobs referenced by manifests if not all blobs +// would be included in the archive. +// The manifest itself is excluded only if it's not present locally. +// This allows to export multi-platform images if not all platforms are present +// while still persisting the multi-platform index. +func WithSkipMissing(store content.InfoReaderProvider) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.blobRecordOptions.childrenHandler = images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + children, err := images.Children(ctx, store, desc) + if !images.IsManifestType(desc.MediaType) { + return children, err + } + + if err != nil { + // If manifest itself is missing, skip it from export. + if errdefs.IsNotFound(err) { + return nil, images.ErrSkipDesc + } + return nil, err + } + + // Don't export manifest descendants if any of them doesn't exist. + for _, child := range children { + exists, err := content.Exists(ctx, store, child) + if err != nil { + return nil, err + } + + // If any child is missing, only export the manifest, but don't export its descendants. + if !exists { + return nil, nil + } + } + return children, nil + }) + return nil + } +} + +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v + } + + annotations[images.AnnotationImageName] = name + annotations[ocispec.AnnotationRefName] = ociReferenceName(name) + + return annotations +} + +func copySourceLabels(ctx context.Context, infoProvider content.InfoProvider, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + info, err := infoProvider.Info(ctx, desc.Digest) + if err != nil { + return desc, err + } + for k, v := range info.Labels { + if strings.HasPrefix(k, labels.LabelDistributionSource) { + if desc.Annotations == nil { + desc.Annotations = map[string]string{k: v} + } else { + desc.Annotations[k] = v + } + } + } + return desc, nil +} + +// Export implements Exporter. +func Export(ctx context.Context, store content.InfoReaderProvider, writer io.Writer, opts ...ExportOpt) error { + var eo exportOptions + for _, opt := range opts { + if err := opt(ctx, &eo); err != nil { + return err + } + } + + records := []tarRecord{ + ociLayoutFile(""), + } + + manifests := make([]ocispec.Descriptor, 0, len(eo.manifests)) + for _, desc := range eo.manifests { + d, err := copySourceLabels(ctx, store, desc) + if err != nil { + log.G(ctx).WithError(err).WithField("desc", desc).Warn("failed to copy distribution.source labels") + continue + } + manifests = append(manifests, d) + } + + algorithms := map[string]struct{}{} + dManifests := map[digest.Digest]*exportManifest{} + resolvedIndex := map[digest.Digest]digest.Digest{} + for _, desc := range manifests { + if images.IsManifestType(desc.MediaType) { + mt, ok := dManifests[desc.Digest] + if !ok { + // TODO(containerd): Skip if already added + r, err := getRecords(ctx, store, desc, algorithms, &eo.blobRecordOptions) + if err != nil { + return err + } + records = append(records, r...) + + mt = &exportManifest{ + manifest: desc, + } + dManifests[desc.Digest] = mt + } + + name := desc.Annotations[images.AnnotationImageName] + if name != "" { + mt.names = append(mt.names, name) + } + } else if images.IsIndexType(desc.MediaType) { + d, ok := resolvedIndex[desc.Digest] + if !ok { + if err := desc.Digest.Validate(); err != nil { + return err + } + records = append(records, blobRecord(store, desc, &eo.blobRecordOptions)) + + p, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return err + } + + var manifests []ocispec.Descriptor + for _, m := range index.Manifests { + if eo.platform != nil { + if m.Platform == nil || eo.platform.Match(*m.Platform) { + manifests = append(manifests, m) + } else if !eo.allPlatforms { + continue + } + } + + r, err := getRecords(ctx, store, m, algorithms, &eo.blobRecordOptions) + if err != nil { + return err + } + + records = append(records, r...) + } + + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) + } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], + } + } else if eo.platform != nil { + return fmt.Errorf("no manifest found for platform: %w", errdefs.ErrNotFound) + } + resolvedIndex[desc.Digest] = d + } + if d != "" { + if name := desc.Annotations[images.AnnotationImageName]; name != "" { + mt := dManifests[d] + mt.names = append(mt.names, name) + } + + } + } else { + return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument) + } + } + + records = append(records, ociIndexRecord(manifests)) + + if !eo.skipDockerManifest && len(dManifests) > 0 { + tr, err := manifestsRecord(ctx, store, dManifests) + if err != nil { + return fmt.Errorf("unable to create manifests file: %w", err) + } + + records = append(records, tr) + } + + if len(algorithms) > 0 { + records = append(records, directoryRecord("blobs/", 0755)) + for alg := range algorithms { + records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) + } + } + + tw := tar.NewWriter(writer) + defer tw.Close() + return writeTar(ctx, tw, records) +} + +func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}, brOpts *blobRecordOptions) ([]tarRecord, error) { + var records []tarRecord + exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } + records = append(records, blobRecord(store, desc, brOpts)) + algorithms[desc.Digest.Algorithm().String()] = struct{}{} + return nil, nil + } + + childrenHandler := brOpts.childrenHandler + if childrenHandler == nil { + childrenHandler = images.ChildrenHandler(store) + } + + handlers := images.Handlers( + childrenHandler, + images.HandlerFunc(exportHandler), + ) + + // Walk sequentially since the number of fetches is likely one and doing in + // parallel requires locking the export handler + if err := images.Walk(ctx, handlers, desc); err != nil { + return nil, err + } + + return records, nil +} + +type tarRecord struct { + Header *tar.Header + CopyTo func(context.Context, io.Writer) (int64, error) +} + +type blobRecordOptions struct { + blobFilter BlobFilter + childrenHandler images.HandlerFunc +} + +func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOptions) tarRecord { + if opts != nil && opts.blobFilter != nil && !opts.blobFilter(desc) { + return tarRecord{} + } + return tarRecord{ + Header: &tar.Header{ + Name: path.Join(ocispec.ImageBlobsDir, desc.Digest.Algorithm().String(), desc.Digest.Encoded()), + Mode: 0444, + Size: desc.Size, + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + r, err := cs.ReaderAt(ctx, desc) + if err != nil { + return 0, fmt.Errorf("failed to get reader: %w", err) + } + defer r.Close() + + // Verify digest + dgstr := desc.Digest.Algorithm().Digester() + + n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) + if err != nil { + return 0, fmt.Errorf("failed to copy to tar: %w", err) + } + if dgstr.Digest() != desc.Digest { + return 0, fmt.Errorf("unexpected digest %s copied", dgstr.Digest()) + } + return n, nil + }, + } +} + +func directoryRecord(name string, mode int64) tarRecord { + return tarRecord{ + Header: &tar.Header{ + Name: name, + Mode: mode, + Typeflag: tar.TypeDir, + }, + } +} + +func ociLayoutFile(version string) tarRecord { + if version == "" { + version = ocispec.ImageLayoutVersion + } + layout := ocispec.ImageLayout{ + Version: version, + } + + b, err := json.Marshal(layout) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: ocispec.ImageLayoutFile, + Mode: 0444, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } + +} + +func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord { + index := ocispec.Index{ + Versioned: ocispecs.Versioned{ + SchemaVersion: 2, + }, + MediaType: ocispec.MediaTypeImageIndex, + Manifests: manifests, + } + + b, err := json.Marshal(index) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: ocispec.ImageIndexFile, + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } +} + +type exportManifest struct { + manifest ocispec.Descriptor + names []string +} + +func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) { + mfsts := make([]struct { + Config string + RepoTags []string + Layers []string + }, len(manifests)) + + var i int + for _, m := range manifests { + p, err := content.ReadBlob(ctx, store, m.manifest) + if err != nil { + return tarRecord{}, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return tarRecord{}, err + } + + dgst := manifest.Config.Digest + if err := dgst.Validate(); err != nil { + return tarRecord{}, err + } + mfsts[i].Config = path.Join(ocispec.ImageBlobsDir, dgst.Algorithm().String(), dgst.Encoded()) + for _, l := range manifest.Layers { + mfsts[i].Layers = append(mfsts[i].Layers, path.Join(ocispec.ImageBlobsDir, l.Digest.Algorithm().String(), l.Digest.Encoded())) + } + + for _, name := range m.names { + nname, err := familiarizeReference(name) + if err != nil { + return tarRecord{}, err + } + + mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname) + } + + i++ + } + + b, err := json.Marshal(mfsts) + if err != nil { + return tarRecord{}, err + } + + return tarRecord{ + Header: &tar.Header{ + Name: "manifest.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + }, nil +} + +func writeTar(ctx context.Context, tw *tar.Writer, recordsWithEmpty []tarRecord) error { + var records []tarRecord + for _, r := range recordsWithEmpty { + if r.Header != nil { + records = append(records, r) + } + } + sort.Slice(records, func(i, j int) bool { + return records[i].Header.Name < records[j].Header.Name + }) + + var last string + for _, record := range records { + if record.Header.Name == last { + continue + } + last = record.Header.Name + if err := tw.WriteHeader(record.Header); err != nil { + return err + } + if record.CopyTo != nil { + n, err := record.CopyTo(ctx, tw) + if err != nil { + return err + } + if n != record.Header.Size { + return fmt.Errorf("unexpected copy size for %s", record.Header.Name) + } + } else if record.Header.Size > 0 { + return fmt.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go new file mode 100644 index 00000000..64a1587f --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/importer.go @@ -0,0 +1,420 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package archive provides a Docker and OCI compatible importer +package archive + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "path" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/pkg/archive/compression" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/containerd/platforms" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + +// ImportIndex imports an index from a tar archive image bundle +// - implements Docker v1.1, v1.2 and OCI v1. +// - prefers OCI v1 when provided +// - creates OCI index for Docker formats +// - normalizes Docker references and adds as OCI ref name +// e.g. alpine:latest -> docker.io/library/alpine:latest +// - existing OCI reference names are untouched +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { + var ( + tr = tar.NewReader(reader) + + ociLayout ocispec.ImageLayout + mfsts []struct { + Config string + RepoTags []string + Layers []string + } + symlinks = make(map[string]string) + blobs = make(map[string]ocispec.Descriptor) + iopts importOpts + ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return ocispec.Descriptor{}, err + } + if hdr.Typeflag == tar.TypeSymlink { + symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname) + } + + //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA + if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA { + if hdr.Typeflag != tar.TypeDir { + log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored") + } + continue + } + + hdrName := path.Clean(hdr.Name) + if hdrName == ocispec.ImageLayoutFile { + if err = onUntarJSON(tr, &ociLayout); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("untar oci layout %q: %w", hdr.Name, err) + } + } else if hdrName == "manifest.json" { + if err = onUntarJSON(tr, &mfsts); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("untar manifest %q: %w", hdr.Name, err) + } + } else { + dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to ingest %q: %w", hdr.Name, err) + } + + blobs[hdrName] = ocispec.Descriptor{ + Digest: dgst, + Size: hdr.Size, + } + } + } + + // If OCI layout was given, interpret the tar as an OCI layout. + // When not provided, the layout of the tar will be interpreted + // as Docker v1.1 or v1.2. + if ociLayout.Version != "" { + if ociLayout.Version != ocispec.ImageLayoutVersion { + return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version) + } + + idx, ok := blobs[ocispec.ImageIndexFile] + if !ok { + return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion) + } + + idx.MediaType = ocispec.MediaTypeImageIndex + return idx, nil + } + + if mfsts == nil { + return ocispec.Descriptor{}, errors.New("unrecognized image format") + } + + for name, linkname := range symlinks { + desc, ok := blobs[linkname] + if !ok { + return ocispec.Descriptor{}, fmt.Errorf("no target for symlink layer from %q to %q", name, linkname) + } + blobs[name] = desc + } + + idx := ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + } + for _, mfst := range mfsts { + config, ok := blobs[mfst.Config] + if !ok { + return ocispec.Descriptor{}, fmt.Errorf("image config %q not found", mfst.Config) + } + config.MediaType = images.MediaTypeDockerSchema2Config + + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to resolve layers: %w", err) + } + + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, + } + + desc, err := writeManifest(ctx, store, manifest, manifest.MediaType) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("write docker manifest: %w", err) + } + + imgPlatforms, err := images.Platforms(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("unable to resolve platform: %w", err) + } + if len(imgPlatforms) > 0 { + // Only one platform can be resolved from non-index manifest, + // The platform can only come from the config included above, + // if the config has no platform it can be safely omitted. + desc.Platform = &imgPlatforms[0] + + // If the image we've just imported is a Windows image without the OSVersion set, + // we could just assume it matches this host's OS Version. Without this, the + // children labels might not be set on the image content, leading to it being + // garbage collected, breaking the image. + // See: https://github.com/containerd/containerd/issues/5690 + if desc.Platform.OS == "windows" && desc.Platform.OSVersion == "" { + platform := platforms.DefaultSpec() + desc.Platform.OSVersion = platform.OSVersion + } + } + + if len(mfst.RepoTags) == 0 { + idx.Manifests = append(idx.Manifests, desc) + } else { + // Add descriptor per tag + for _, ref := range mfst.RepoTags { + mfstdesc := desc + + normalized, err := normalizeReference(ref) + if err != nil { + return ocispec.Descriptor{}, err + } + + mfstdesc.Annotations = map[string]string{ + images.AnnotationImageName: normalized, + ocispec.AnnotationRefName: ociReferenceName(normalized), + } + + idx.Manifests = append(idx.Manifests, mfstdesc) + } + } + } + + return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex) +} + +const ( + kib = 1024 + mib = 1024 * kib + jsonLimit = 20 * mib +) + +func onUntarJSON(r io.Reader, j interface{}) error { + return json.NewDecoder(io.LimitReader(r, jsonLimit)).Decode(j) +} + +func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) { + dgstr := digest.Canonical.Digester() + + if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil { + return "", err + } + + return dgstr.Digest(), nil +} + +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + filters := make([]string, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + for i, f := range layerFiles { + desc, ok := blobs[f] + if !ok { + return nil, fmt.Errorf("layer %q not found", f) + } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters[i] = fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String()) + } + + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels[labels.LabelUncompressed] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.Digest = info.Digest + desc.Size = info.Size + mediaType, err := detectLayerMediaType(ctx, store, *desc) + if err != nil { + return fmt.Errorf("failed to detect media type of layer: %w", err) + } + desc.MediaType = mediaType + } + } + return nil + }, filters...) + if err != nil { + return nil, fmt.Errorf("failure checking for compressed blobs: %w", err) + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } + // Open blob, resolve media type + ra, err := store.ReaderAt(ctx, desc) + if err != nil { + return nil, fmt.Errorf("failed to open %q (%s): %w", layerFiles[i], desc.Digest, err) + } + s, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + ra.Close() + return nil, fmt.Errorf("failed to detect compression for %q: %w", layerFiles[i], err) + } + if s.GetCompression() == compression.Uncompressed { + if compress { + if err := desc.Digest.Validate(); err != nil { + return nil, err + } + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + labels.LabelUncompressed: desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + ra.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } + s.Close() + ra.Close() + } + return layers, nil +} + +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to open writer: %w", err) + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to truncate writer: %w", err) + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to get writer status: %w", err) + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, fmt.Errorf("failed to commit: %w", err) + } + } + + return desc, nil +} + +func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { + manifestBytes, err := json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, err + } + + desc := ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest.FromBytes(manifestBytes), + Size: int64(len(manifestBytes)), + } + if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil { + return ocispec.Descriptor{}, err + } + + return desc, nil +} + +func detectLayerMediaType(ctx context.Context, store content.Store, desc ocispec.Descriptor) (string, error) { + var mediaType string + // need to parse existing blob to use the proper media type + bytes := make([]byte, 10) + ra, err := store.ReaderAt(ctx, desc) + if err != nil { + return "", fmt.Errorf("failed to read content store to detect layer media type: %w", err) + } + defer ra.Close() + _, err = ra.ReadAt(bytes, 0) + if err != nil && err != io.EOF { + return "", fmt.Errorf("failed to read header bytes from layer to detect media type: %w", err) + } + if err == io.EOF { + // in the case of an empty layer then the media type should be uncompressed + return images.MediaTypeDockerSchema2Layer, nil + } + switch c := compression.DetectCompression(bytes); c { + case compression.Uncompressed: + mediaType = images.MediaTypeDockerSchema2Layer + default: + mediaType = images.MediaTypeDockerSchema2LayerGzip + } + return mediaType, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go b/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go new file mode 100644 index 00000000..5e12eb98 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/archive/reference.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "fmt" + "strings" + + "github.com/containerd/containerd/v2/pkg/reference" + distref "github.com/distribution/reference" + "github.com/opencontainers/go-digest" +) + +// FilterRefPrefix restricts references to having the given image +// prefix. Tag-only references will have the prefix prepended. +func FilterRefPrefix(image string) func(string) string { + return refTranslator(image, true) +} + +// AddRefPrefix prepends the given image prefix to tag-only references, +// while leaving returning full references unmodified. +func AddRefPrefix(image string) func(string) string { + return refTranslator(image, false) +} + +// refTranslator creates a reference which only has a tag or verifies +// a full reference. +func refTranslator(image string, checkPrefix bool) func(string) string { + return func(ref string) string { + if image == "" { + return "" + } + // Check if ref is full reference + if strings.ContainsAny(ref, "/:@") { + // If not prefixed, don't include image + if checkPrefix && !isImagePrefix(ref, image) { + return "" + } + return ref + } + return image + ":" + ref + } +} + +func isImagePrefix(s, prefix string) bool { + if !strings.HasPrefix(s, prefix) { + return false + } + if len(s) > len(prefix) { + switch s[len(prefix)] { + case '/', ':', '@': + // Prevent matching partial namespaces + default: + return false + } + } + return true +} + +func normalizeReference(ref string) (string, error) { + normalized, err := distref.ParseDockerRef(ref) + if err != nil { + return "", fmt.Errorf("normalize image ref %q: %w", ref, err) + } + + return normalized.String(), nil +} + +func familiarizeReference(ref string) (string, error) { + named, err := distref.ParseNormalizedNamed(ref) + if err != nil { + return "", fmt.Errorf("failed to parse %q: %w", ref, err) + } + named = distref.TagNameOnly(named) + + return distref.FamiliarString(named), nil +} + +func ociReferenceName(name string) string { + // OCI defines the reference name as only a tag excluding the + // repository. The containerd annotation contains the full image name + // since the tag is insufficient for correctly naming and referring to an + // image + var ociRef string + if spec, err := reference.Parse(name); err == nil { + ociRef = spec.Object + } else { + ociRef = name + } + + return ociRef +} + +// DigestTranslator creates a digest reference by adding the +// digest to an image name +func DigestTranslator(prefix string) func(digest.Digest) string { + return func(dgst digest.Digest) string { + return prefix + "@" + dgst.String() + } +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/diffid.go b/vendor/github.com/containerd/containerd/v2/core/images/diffid.go new file mode 100644 index 00000000..140d2c07 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/diffid.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/pkg/archive/compression" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/log" +) + +// GetDiffID gets the diff ID of the layer blob descriptor. +func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { + switch desc.MediaType { + case + // If the layer is already uncompressed, we can just return its digest + MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerForeign, + ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated + return desc.Digest, nil + } + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return "", err + } + v, ok := info.Labels[labels.LabelUncompressed] + if ok { + // Fast path: if the image is already unpacked, we can use the label value + return digest.Parse(v) + } + // if the image is not unpacked, we may not have the label + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return "", err + } + defer ra.Close() + r := content.NewReader(ra) + uR, err := compression.DecompressStream(r) + if err != nil { + return "", err + } + defer uR.Close() + digester := digest.Canonical.Digester() + hashW := digester.Hash() + if _, err := io.Copy(hashW, uR); err != nil { + return "", err + } + if err := ra.Close(); err != nil { + return "", err + } + digest := digester.Digest() + // memorize the computed value + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[labels.LabelUncompressed] = digest.String() + if _, err := cs.Update(ctx, info, "labels"); err != nil { + log.G(ctx).WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) + } + return digest, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/handlers.go b/vendor/github.com/containerd/containerd/v2/core/images/handlers.go new file mode 100644 index 00000000..0487fdeb --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/handlers.go @@ -0,0 +1,320 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/errdefs" + "github.com/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +var ( + // ErrSkipDesc is used to skip processing of a descriptor and + // its descendants. + ErrSkipDesc = errors.New("skip descriptor") + + // ErrStopHandler is used to signify that the descriptor + // has been handled and should not be handled further. + // This applies only to a single descriptor in a handler + // chain and does not apply to descendant descriptors. + ErrStopHandler = errors.New("stop handler") + + // ErrEmptyWalk is used when the WalkNotEmpty handlers return no + // children (e.g.: they were filtered out). + ErrEmptyWalk = errors.New("image might be filtered out") +) + +// Handler handles image manifests +type Handler interface { + Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) +} + +// HandlerFunc function implementing the Handler interface +type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) + +// Handle image manifests +func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + return fn(ctx, desc) +} + +// Handlers returns a handler that will run the handlers in sequence. +// +// A handler may return `ErrStopHandler` to stop calling additional handlers +func Handlers(handlers ...Handler) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + var children []ocispec.Descriptor + for _, handler := range handlers { + ch, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrStopHandler) { + break + } + return nil, err + } + + children = append(children, ch...) + } + + return children, nil + } +} + +// Walk the resources of an image and call the handler for each. If the handler +// decodes the sub-resources for each image, +// +// This differs from dispatch in that each sibling resource is considered +// synchronously. +func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + for _, desc := range descs { + + children, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrSkipDesc) { + continue // don't traverse the children. + } + return err + } + + if len(children) > 0 { + if err := Walk(ctx, handler, children...); err != nil { + return err + } + } + } + return nil +} + +// WalkNotEmpty works the same way Walk does, with the exception that it ensures that +// some children are still found by Walking the descriptors (for example, not all of +// them have been filtered out by one of the handlers). If there are no children, +// then an ErrEmptyWalk error is returned. +func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + isEmpty := true + var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := handler.Handle(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + isEmpty = false + } + + return children, nil + } + + err := Walk(ctx, notEmptyHandler, descs...) + if err != nil { + return err + } + + if isEmpty { + return ErrEmptyWalk + } + + return nil +} + +// Dispatch runs the provided handler for content specified by the descriptors. +// If the handler decode subresources, they will be visited, as well. +// +// Handlers for siblings are run in parallel on the provided descriptors. A +// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse +// any children. +// +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// +// Typically, this function will be used with `FetchHandler`, often composed +// with other handlers. +// +// If any handler returns an error, the dispatch session will be canceled. +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { + eg, ctx2 := errgroup.WithContext(ctx) + for _, desc := range descs { + desc := desc + + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } + + eg.Go(func() error { + desc := desc + + children, err := handler.Handle(ctx2, desc) + if limiter != nil { + limiter.Release(1) + } + if err != nil { + if errors.Is(err, ErrSkipDesc) { + return nil // don't traverse the children. + } + return err + } + + if len(children) > 0 { + return Dispatch(ctx2, handler, limiter, children...) + } + + return nil + }) + } + + return eg.Wait() +} + +// ChildrenHandler decodes well-known manifest types and returns their children. +// +// This is useful for supporting recursive fetch and other use cases where you +// want to do a full walk of resources. +// +// One can also replace this with another implementation to allow descending of +// arbitrary types. +func ChildrenHandler(provider content.Provider) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return SetChildrenMappedLabels(manager, f, nil) +} + +// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +// The label map allows the caller to control the labels per child descriptor. +// For returned labels, the index of the child will be appended to the end +// except for the first index when the returned label does not end with '.'. +func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { + if labelMap == nil { + labelMap = ChildGCLabels + } + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + var ( + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields = []string{} + keys = map[string]uint{} + ) + for _, ch := range children { + labelKeys := labelMap(ch) + for _, key := range labelKeys { + idx := keys[key] + keys[key] = idx + 1 + if idx > 0 || key[len(key)-1] == '.' { + key = fmt.Sprintf("%s%d", key, idx) + } + + info.Labels[key] = ch.Digest.String() + fields = append(fields, "labels."+key) + } + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatforms is a handler wrapper which limits the descriptors returned +// based on matching the specified platform matcher. +func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + + if m == nil { + descs = children + } else { + for _, d := range children { + if d.Platform == nil || m.Match(*d.Platform) { + descs = append(descs, d) + } + } + } + + return descs, nil + } +} + +// LimitManifests is a handler wrapper which filters the manifest descriptors +// returned using the provided platform. +// The results will be ordered according to the comparison operator and +// use the ordering in the manifests for equal matches. +// A limit of 0 or less is considered no limit. +// A not found error is returned if no manifest is matched. +func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + // only limit manifests from an index + if IsIndexType(desc.MediaType) { + sort.SliceStable(children, func(i, j int) bool { + if children[i].Platform == nil { + return false + } + if children[j].Platform == nil { + return true + } + return m.Less(*children[i].Platform, *children[j].Platform) + }) + + if n > 0 { + if len(children) == 0 { + return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) + } + if len(children) > n { + children = children[:n] + } + } + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/image.go b/vendor/github.com/containerd/containerd/v2/core/images/image.go new file mode 100644 index 00000000..9fcce9b4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/image.go @@ -0,0 +1,440 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Image provides the model for how containerd views container images. +type Image struct { + // Name of the image. + // + // To be pulled, it must be a reference compatible with resolvers. + // + // This field is required. + Name string + + // Labels provide runtime decoration for the image record. + // + // There is no default behavior for how these labels are propagated. They + // only decorate the static metadata object. + // + // This field is optional. + Labels map[string]string + + // Target describes the root content for this image. Typically, this is + // a manifest, index or manifest list. + Target ocispec.Descriptor + + CreatedAt, UpdatedAt time.Time +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool + Target *ocispec.Descriptor +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + +// DeleteTarget is used to specify the target value an image is expected +// to have when deleting. If the image has a different target, then +// NotFound is returned. +func DeleteTarget(target *ocispec.Descriptor) DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Target = target + return nil + } +} + +// Store and interact with images +type Store interface { + Get(ctx context.Context, name string) (Image, error) + List(ctx context.Context, filters ...string) ([]Image, error) + Create(ctx context.Context, image Image) (Image, error) + + // Update will replace the data in the store with the provided image. If + // one or more fieldpaths are provided, only those fields will be updated. + Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) + + Delete(ctx context.Context, name string, opts ...DeleteOpt) error +} + +// TODO(stevvooe): Many of these functions make strong platform assumptions, +// which are untrue in a lot of cases. More refactoring must be done here to +// make this work in all cases. + +// Config resolves the image configuration descriptor. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + return Config(ctx, provider, image.Target, platform) +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { + desc, err := image.Config(ctx, provider, platform) + if err != nil { + return nil, err + } + return RootFS(ctx, provider, desc) +} + +// Size returns the total size of an image's packed resources. +func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { + var size int64 + return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + size += desc.Size + return nil, nil + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) +} + +type platformManifest struct { + p *ocispec.Platform + m *ocispec.Manifest +} + +// Manifest resolves a manifest from the image for the given platform. +// +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// +// TODO(stevvooe): This violates the current platform agnostic approach to this +// package by returning a specific manifest type. We'll need to refactor this +// to return a manifest descriptor or decide that we want to bring the API in +// this direction because this abstraction is not needed. +func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { + var ( + limit = 1 + m []platformManifest + wasIndex bool + ) + + if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if IsManifestType(desc.MediaType) { + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Digest != image.Digest && platform != nil { + if desc.Platform != nil && !platform.Match(*desc.Platform) { + return nil, nil + } + + if desc.Platform == nil { + imagePlatform, err := ConfigPlatform(ctx, provider, manifest.Config) + if err != nil { + return nil, err + } + if !platform.Match(imagePlatform) { + return nil, nil + } + + } + } + + m = append(m, platformManifest{ + p: desc.Platform, + m: &manifest, + }) + + return nil, nil + } else if IsIndexType(desc.MediaType) { + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + if platform == nil { + return idx.Manifests, nil + } + + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + + wasIndex = true + + if len(descs) > limit { + return descs[:limit], nil + } + return descs, nil + } + return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) + }), image); err != nil { + return ocispec.Manifest{}, err + } + + if len(m) == 0 { + err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) + if wasIndex { + err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) + } + return ocispec.Manifest{}, err + } + return *m[0].m, nil +} + +// Config resolves the image configuration descriptor using a content provided +// to resolve child resources on the image. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + manifest, err := Manifest(ctx, provider, image, platform) + if err != nil { + return ocispec.Descriptor{}, err + } + return manifest.Config, nil +} + +// Platforms returns one or more platforms supported by the image. +func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { + var platformSpecs []ocispec.Platform + return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Platform != nil { + if desc.Platform.OS == "unknown" || desc.Platform.Architecture == "unknown" { + return nil, ErrSkipDesc + } + platformSpecs = append(platformSpecs, *desc.Platform) + return nil, ErrSkipDesc + } + + if IsConfigType(desc.MediaType) { + imagePlatform, err := ConfigPlatform(ctx, provider, desc) + if err != nil { + return nil, err + } + platformSpecs = append(platformSpecs, imagePlatform) + } + return nil, nil + }), ChildrenHandler(provider)), image) +} + +// Check returns nil if the all components of an image are available in the +// provider for the specified platform. +// +// If available is true, the caller can assume that required represents the +// complete set of content required for the image. +// +// missing will have the components that are part of required but not available +// in the provider. +// +// If there is a problem resolving content, an error will be returned. +func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { + mfst, err := Manifest(ctx, provider, image, platform) + if err != nil { + if errdefs.IsNotFound(err) { + return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil + } + + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) + } + + // TODO(stevvooe): It is possible that referenced components could have + // children, but this is rare. For now, we ignore this and only verify + // that manifest components are present. + required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) + + for _, desc := range required { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + missing = append(missing, desc) + continue + } else { + return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) + } + } + ra.Close() + present = append(present, desc) + + } + + return true, required, present, missing, nil +} + +// Children returns the immediate children of content described by the descriptor. +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if IsManifestType(desc.MediaType) { + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil + } else if IsIndexType(desc.MediaType) { + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + return append([]ocispec.Descriptor{}, index.Manifests...), nil + } else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) && !IsAttestationType(desc.MediaType) { + // Layers, configs, and attestations are childless data types and should not be logged. + log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + return nil, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated. +type unknownDocument struct { + MediaType string `json:"mediaType,omitempty"` + Config json.RawMessage `json:"config,omitempty"` + Layers json.RawMessage `json:"layers,omitempty"` + Manifests json.RawMessage `json:"manifests,omitempty"` + FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 +} + +// validateMediaType returns an error if the byte slice is invalid JSON, +// if the format of the blob is not supported, or if the media type +// identifies the blob as one format, but it identifies itself as, or +// contains elements of another format. +func validateMediaType(b []byte, mt string) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if len(doc.FSLayers) != 0 { + return fmt.Errorf("media-type: schema 1 not supported") + } + if IsManifestType(mt) && (len(doc.Manifests) != 0 || IsIndexType(doc.MediaType)) { + return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) + } else if IsIndexType(mt) && (len(doc.Config) != 0 || len(doc.Layers) != 0 || IsManifestType(doc.MediaType)) { + return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) + } + return nil +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return nil, err + } + + var config ocispec.Image + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + return config.RootFS.DiffIDs, nil +} + +// ConfigPlatform returns a normalized platform from an image manifest config. +func ConfigPlatform(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) (ocispec.Platform, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return ocispec.Platform{}, err + } + + // Technically, this should be ocispec.Image, but we only need the + // ocispec.Platform that is embedded in the image struct. + var imagePlatform ocispec.Platform + if err := json.Unmarshal(p, &imagePlatform); err != nil { + return ocispec.Platform{}, err + } + return platforms.Normalize(imagePlatform), nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/importexport.go b/vendor/github.com/containerd/containerd/v2/core/images/importexport.go new file mode 100644 index 00000000..601d545e --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/importexport.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/v2/core/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Importer is the interface for image importer. +type Importer interface { + // Import imports an image from a tar stream. + Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) +} + +// Exporter is the interface for image exporter. +type Exporter interface { + // Export exports an image to a tar stream. + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error +} diff --git a/vendor/github.com/containerd/containerd/v2/core/images/labels.go b/vendor/github.com/containerd/containerd/v2/core/images/labels.go new file mode 100644 index 00000000..06dfed57 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/labels.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" +) diff --git a/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go b/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go new file mode 100644 index 00000000..0c8600d8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go @@ -0,0 +1,235 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// mediatype definitions for image components handled in containerd. +// +// oci components are generally referenced directly, although we may centralize +// here for clarity. +const ( + MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" + MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd" + MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" + MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + + // Checkpoint/Restore Media Types + + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" + + // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + + // Encrypted media types + + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" + MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" + + // In-toto attestation + MediaTypeInToto = "application/vnd.in-toto+json" +) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case MediaTypeDockerSchema2LayerZstd: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "zstd", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + case "zstd": + return "zstd", nil + } + } + return "", nil + default: + return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (mediaType string, suffixes []string) { + if mt == "" { + return "", []string{} + } + mediaType, ext, ok := strings.Cut(mt, "+") + if !ok { + return mediaType, []string{} + } + + // Splitting the extensions following the mediatype "(+)gzip+encrypted". + // We expect this to be a limited list, so add an arbitrary limit (50). + // + // Note that DiffCompression is only using the last element, so perhaps we + // should split on the last "+" only. + suffixes = strings.SplitN(ext, "+", 50) + sort.Strings(suffixes) + return mediaType, suffixes +} + +// IsNonDistributable returns true if the media type is non-distributable. +func IsNonDistributable(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || + strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") +} + +// IsLayerType returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + switch base, _ := parseMediaTypes(mt); base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2LayerZstd: + return true + } + return false +} + +// IsDockerType returns true if the media type has "application/vnd.docker." prefix +func IsDockerType(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.docker.") +} + +// IsManifestType returns true if the media type is an OCI-compatible manifest. +// No support for schema1 manifest. +func IsManifestType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return true + default: + return false + } +} + +// IsIndexType returns true if the media type is an OCI-compatible index. +func IsIndexType(mt string) bool { + switch mt { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + return true + default: + return false + } +} + +// IsConfigType returns true if the media type is an OCI-compatible image config. +// No support for containerd checkpoint configs. +func IsConfigType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + return true + default: + return false + } +} + +// IsKnownConfig returns true if the media type is a known config type, +// including containerd checkpoint configs +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} + +// IsAttestationType returns true if the media type is an attestation type +func IsAttestationType(mt string) bool { + switch mt { + case MediaTypeInToto: + return true + default: + return false + } +} + +// ChildGCLabels returns the label for a given descriptor to reference it +func ChildGCLabels(desc ocispec.Descriptor) []string { + mt := desc.MediaType + if IsKnownConfig(mt) { + return []string{"containerd.io/gc.ref.content.config"} + } + + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return []string{"containerd.io/gc.ref.content.m."} + } + + if IsLayerType(mt) { + return []string{"containerd.io/gc.ref.content.l."} + } + + return []string{"containerd.io/gc.ref.content."} +} + +// ChildGCLabelsFilterLayers returns the labels for a given descriptor to +// reference it, skipping layer media types +func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { + if IsLayerType(desc.MediaType) { + return nil + } + return ChildGCLabels(desc) +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go new file mode 100644 index 00000000..16ea609a --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go @@ -0,0 +1,234 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors" + "github.com/containerd/containerd/v2/pkg/tracing" + "github.com/containerd/containerd/v2/version" + "github.com/containerd/log" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +// GenerateTokenOptions generates options for fetching a token based on a challenge +func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { + realm, ok := c.Parameters["realm"] + if !ok { + return TokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err) + } + + to := TokenOptions{ + Realm: realmURL.String(), + Service: c.Parameters["service"], + Username: username, + Secret: secret, + } + + scope, ok := c.Parameters["scope"] + if ok { + to.Scopes = append(to.Scopes, strings.Split(scope, " ")...) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + return to, nil +} + +// TokenOptions are options for requesting a token +type TokenOptions struct { + Realm string + Service string + Scopes []string + Username string + Secret string + + // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token. + // + // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request. + // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token + // + // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request. + // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + FetchRefreshToken bool +} + +// OAuthTokenResponse is response from fetching token with a OAuth POST request +type OAuthTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +// FetchTokenWithOAuth fetches a token using a POST request +func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { + c := *client + client = &c + tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchTokenWithOAuth")) + + form := url.Values{} + if len(to.Scopes) > 0 { + form.Set("scope", strings.Join(to.Scopes, " ")) + } + form.Set("service", to.Service) + form.Set("client_id", clientID) + + if to.Username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.Secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.Username) + form.Set("password", to.Secret) + } + if to.FetchRefreshToken { + form.Set("access_type", "offline") + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, remoteserrors.NewUnexpectedStatusErr(resp) + } + + decoder := json.NewDecoder(resp.Body) + + var tr OAuthTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, fmt.Errorf("unable to decode token response: %w", err) + } + + if tr.AccessToken == "" { + return nil, ErrNoToken + } + + return &tr, nil +} + +// FetchTokenResponse is response from fetching token with GET request +type FetchTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresInSeconds int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// FetchToken fetches a token using a GET request +func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { + c := *client + client = &c + tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchToken")) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil) + if err != nil { + return nil, err + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + reqParams := req.URL.Query() + + if to.Service != "" { + reqParams.Add("service", to.Service) + } + + for _, scope := range to.Scopes { + reqParams.Add("scope", scope) + } + + if to.Secret != "" { + req.SetBasicAuth(to.Username, to.Secret) + } + + if to.FetchRefreshToken { + reqParams.Add("offline_token", "true") + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, remoteserrors.NewUnexpectedStatusErr(resp) + } + + decoder := json.NewDecoder(resp.Body) + + var tr FetchTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, fmt.Errorf("unable to decode token response: %w", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, ErrNoToken + } + + return &tr, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go new file mode 100644 index 00000000..6ca93515 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/parse.go @@ -0,0 +1,200 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "net/http" + "sort" + "strings" +) + +// AuthenticationScheme defines scheme of the authentication method +type AuthenticationScheme byte + +const ( + // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 + BasicAuth AuthenticationScheme = 1 << iota + // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 + DigestAuth + // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 + BearerAuth +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // scheme is the auth-scheme according to RFC 2617 + Scheme AuthenticationScheme + + // parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +type byScheme []Challenge + +func (bs byScheme) Len() int { return len(bs) } +func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } + +// Less sorts in priority order: token > digest > basic +func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ParseAuthHeader parses challenges from WWW-Authenticate header +func ParseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + var s AuthenticationScheme + switch v { + case "basic": + s = BasicAuth + case "digest": + s = DigestAuth + case "bearer": + s = BearerAuth + default: + continue + } + challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) + } + sort.Stable(byScheme(challenges)) + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + for { + var pkey string + pkey, s = expectToken(skipSpace(s)) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + return + } + s = s[1:] + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go new file mode 100644 index 00000000..01fc792f --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go @@ -0,0 +1,378 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/v2/core/remotes/docker/auth" + remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors" + "github.com/containerd/errdefs" + "github.com/containerd/log" +) + +type dockerAuthorizer struct { + credentials func(string) (string, string, error) + + client *http.Client + header http.Header + mu sync.RWMutex + + // indexed by host name + handlers map[string]*authHandler + + onFetchRefreshToken OnFetchRefreshToken +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header + onFetchRefreshToken OnFetchRefreshToken +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client + } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// OnFetchRefreshToken is called on fetching request token. +type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request) + +// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token"). +func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.onFetchRefreshToken = f + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + + return &dockerAuthorizer{ + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + onFetchRefreshToken: ao.onFetchRefreshToken, + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil + } + + auth, refreshToken, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) + + if refreshToken != "" { + a.mu.RLock() + onFetchRefreshToken := a.onFetchRefreshToken + a.mu.RUnlock() + if onFetchRefreshToken != nil { + onFetchRefreshToken(ctx, refreshToken, req) + } + } + return nil +} + +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + last := responses[len(responses)-1] + host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { + if retry, err := invalidAuthorization(ctx, c, responses); err != nil { + delete(a.handlers, host) + return err + } else if retry { + delete(a.handlers, host) + } + + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + var username, secret string + if a.credentials != nil { + var err error + username, secret, err = a.credentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) + if err != nil { + return err + } + common.FetchRefreshToken = a.onFetchRefreshToken != nil + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) + return nil + } else if c.Scheme == auth.BasicAuth && a.credentials != nil { + username, secret, err := a.credentials(host) + if err != nil { + return err + } + + if username == "" || secret == "" { + return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization) + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{ + Username: username, + Secret: secret, + }) + return nil + } + } + return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented) +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + refreshToken string + expirationTime *time.Time + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme auth.AuthenticationScheme + + // common contains common challenge answer + common auth.TokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, string, error) { + switch ah.scheme { + case auth.BasicAuth: + return ah.doBasicAuth(ctx) + case auth.BearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented) + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) { + username, secret := ah.common.Username, ah.common.Secret + + if username == "" || secret == "" { + return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), "", nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) { + // copy common tokenOptions + to := ah.common + + to.Scopes = GetTokenScopes(ctx, to.Scopes) + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.Scopes, " ") + + // Keep track of the expiration time of cached bearer tokens so they can be + // refreshed when they expire without a server roundtrip. + var expirationTime *time.Time + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist && (r.expirationTime == nil || r.expirationTime.After(time.Now())) { + ah.Unlock() + r.Wait() + return r.token, r.refreshToken, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.refreshToken, r.err, r.expirationTime = token, refreshToken, err, expirationTime + r.Done() + }() + + // fetch token for the resource scope + if to.Secret != "" { + defer func() { + if err != nil { + err = fmt.Errorf("failed to fetch oauth token: %w", err) + } + }() + // credential information is provided, use oauth POST endpoint + // TODO: Allow setting client_id + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) + if err != nil { + var errStatus remoteerrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + // As of January 2022, ACR is known to return 400. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 || errStatus.StatusCode == 400 { + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", "", err + } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) + return resp.Token, resp.RefreshToken, nil + } + log.G(ctx).WithFields(log.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", "", err + } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) + return resp.AccessToken, resp.RefreshToken, nil + } + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err) + } + expirationTime = getExpirationTime(resp.ExpiresInSeconds) + return resp.Token, resp.RefreshToken, nil +} + +func getExpirationTime(expiresInSeconds int) *time.Time { + if expiresInSeconds <= 0 { + return nil + } + expirationTime := time.Now().Add(time.Duration(expiresInSeconds) * time.Second) + return &expirationTime +} + +func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) { + errStr := c.Parameters["error"] + if errStr == "" { + return retry, nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + limitedErr := errStr + errLenghLimit := 64 + if len(limitedErr) > errLenghLimit { + limitedErr = limitedErr[:errLenghLimit] + "..." + } + log.G(ctx).WithField("error", limitedErr).Debug("authorization error using bearer token, retrying") + return true, nil + } + + return retry, fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go new file mode 100644 index 00000000..f21103e1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter.go @@ -0,0 +1,85 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + "github.com/containerd/log" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// LegacyConfigMediaType should be replaced by OCI image spec. +// +// More detail: docker/distribution#1622 +const LegacyConfigMediaType = "application/octet-stream" + +// ConvertManifest changes application/octet-stream to schema2 config media type if need. +// +// NOTE: +// 1. original manifest will be deleted by next gc round. +// 2. don't cover manifest list. +func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + if !images.IsManifestType(desc.MediaType) { + log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) + return desc, nil + } + + // read manifest data + mb, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(mb, &manifest); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err) + } + + // check config media type + if manifest.Config.MediaType != LegacyConfigMediaType { + return desc, nil + } + + manifest.Config.MediaType = images.MediaTypeDockerSchema2Config + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err) + } + + // update manifest with gc labels + desc.Digest = digest.Canonical.FromBytes(data) + desc.Size = int64(len(data)) + + labels := map[string]string{} + for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err) + } + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go new file mode 100644 index 00000000..c97b6b89 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go @@ -0,0 +1,54 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/containerd/containerd/v2/plugins/content/local" + "github.com/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func FuzzConvertManifest(data []byte) int { + ctx := context.Background() + + // Do not log the message below + // level=warning msg="do nothing for media type: ..." + log.G(ctx).Logger.SetLevel(log.PanicLevel) + + f := fuzz.NewConsumer(data) + desc := ocispec.Descriptor{} + err := f.GenerateStruct(&desc) + if err != nil { + return 0 + } + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, _ = ConvertManifest(ctx, cs, desc) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go new file mode 100644 index 00000000..8c623bcb --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go @@ -0,0 +1,283 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable description of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go new file mode 100644 index 00000000..b2bd4d82 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errdesc.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go new file mode 100644 index 00000000..2c7d880a --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go @@ -0,0 +1,358 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "compress/flate" + "compress/gzip" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type dockerFetcher struct { + *dockerBase +} + +func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, err + } + + return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { + // firstly try fetch via external urls + for _, us := range desc.URLs { + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debugf("failed to parse %q", us) + continue + } + if u.Scheme != "http" && u.Scheme != "https" { + log.G(ctx).Debug("non-http(s) alternative url is unsupported") + continue + } + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).Info("request") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try one of the other urls. + } + + return nil, err + } + + return rc, nil + } + + // Try manifests endpoints for manifests types + if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) || + desc.MediaType == images.MediaTypeDockerSchema1Manifest { + + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + return nil, firstErr + } + + // Finally use blobs endpoints + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w", + desc.Digest, desc.MediaType, errdefs.ErrNotFound, + ) + } + + return nil, firstErr + + }) +} + +func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) { + headReq := r.request(host, http.MethodHead, ps...) + if err := headReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + + if mediatype == "" { + headReq.header.Set("Accept", "*/*") + } else { + headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } + + headResp, err := headReq.doWithRetries(ctx, nil) + if err != nil { + return nil, 0, err + } + if headResp.Body != nil { + headResp.Body.Close() + } + if headResp.StatusCode > 299 { + return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status) + } + + getReq := r.request(host, http.MethodGet, ps...) + if err := getReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + return getReq, headResp.ContentLength, nil +} + +func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...remotes.FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error) { + var desc ocispec.Descriptor + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst)) + var config remotes.FetchByDigestConfig + for _, o := range opts { + if err := o(ctx, &config); err != nil { + return nil, desc, err + } + } + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, desc, err + } + + var ( + getReq *request + sz int64 + firstErr error + ) + + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + + if getReq == nil { + // Fall back to the "manifests" endpoint + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + } + + if getReq == nil { + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound) + } + if firstErr == nil { + firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst) + } + return nil, desc, firstErr + } + + seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) { + return r.open(ctx, getReq, config.Mediatype, offset) + }) + if err != nil { + return nil, desc, err + } + + desc = ocispec.Descriptor{ + MediaType: "application/octet-stream", + Digest: dgst, + Size: sz, + } + if config.Mediatype != "" { + desc.MediaType = config.Mediatype + } + return seeker, desc, nil +} + +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { + if mediatype == "" { + req.header.Set("Accept", "*/*") + } else { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } + req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5") + + if offset > 0 { + // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints + // will return the header without supporting the range. The content + // range must always be checked. + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + resp.Body.Close() + } + }() + + if resp.StatusCode > 299 { + // TODO(stevvooe): When doing a offset specific request, we should + // really distinguish between a 206 and a 200. In the case of 200, we + // can discard the bytes, hiding the seek behavior from the + // implementation. + + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound) + } + var registryErr Errors + if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { + return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status) + } + return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) + } + if offset > 0 { + cr := resp.Header.Get("content-range") + if cr != "" { + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + return nil, fmt.Errorf("unhandled content range in response: %v", cr) + + } + } else { + // TODO: Should any cases where use of content range + // without the proper header be considered? + // 206 responses? + + // Discard up to offset + // Could use buffer pool here but this case should be rare + n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) + if err != nil { + return nil, fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return nil, errors.New("unable to discard to offset") + } + + } + } + + body := resp.Body + encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool { + return r == ' ' || r == '\t' || r == ',' + }) + for i := len(encoding) - 1; i >= 0; i-- { + algorithm := strings.ToLower(encoding[i]) + switch algorithm { + case "zstd": + r, err := zstd.NewReader(body) + if err != nil { + return nil, err + } + body = r.IOReadCloser() + case "gzip": + body, err = gzip.NewReader(body) + if err != nil { + return nil, err + } + case "deflate": + body = flate.NewReader(body) + case "identity", "": + // no content-encoding applied, use raw body + default: + return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm) + } + } + + return body, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go new file mode 100644 index 00000000..5da9dca7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go @@ -0,0 +1,75 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strconv" +) + +func FuzzFetcher(data []byte) int { + dataLen := len(data) + if dataLen == 0 { + return -1 + } + + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) + rw.Header().Set("content-length", strconv.Itoa(dataLen)) + rw.Write(data) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + return 0 + } + + f := dockerFetcher{&dockerBase{ + repository: "nonempty", + }} + host := RegistryHost{ + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + } + + ctx := context.Background() + req := f.request(host, http.MethodGet) + rc, err := f.open(ctx, req, "", 0) + if err != nil { + return 0 + } + b, err := io.ReadAll(rc) + if err != nil { + return 0 + } + + expected := data + if len(b) != len(expected) { + panic("len of request is not equal to len of expected but should be") + } + return 1 +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go new file mode 100644 index 00000000..615869a2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/handler.go @@ -0,0 +1,149 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/containerd/v2/pkg/reference" + "github.com/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// AppendDistributionSourceLabel updates the label of blob with distribution source. +func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return nil, err + } + + source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + info, err := manager.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + key := distributionSourceLabelKey(source) + + originLabel := "" + if info.Labels != nil { + originLabel = info.Labels[key] + } + value := appendDistributionSourceLabel(originLabel, repo) + + // The repo name has been limited under 256 and the distribution + // label might hit the limitation of label size, when blob data + // is used as the very, very common layer. + if err := labels.Validate(key, value); err != nil { + log.G(ctx).Warnf("skip to append distribution label: %s", err) + return nil, nil + } + + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + key: value, + }, + } + _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) + return nil, err + }, nil +} + +func appendDistributionSourceLabel(originLabel, repo string) string { + repos := []string{} + if originLabel != "" { + repos = strings.Split(originLabel, ",") + } + repos = append(repos, repo) + + // use empty string to present duplicate items + for i := 1; i < len(repos); i++ { + tmp, j := repos[i], i-1 + for ; j >= 0 && repos[j] >= tmp; j-- { + if repos[j] == tmp { + tmp = "" + } + repos[j+1] = repos[j] + } + repos[j+1] = tmp + } + + i := 0 + for ; i < len(repos) && repos[i] == ""; i++ { + } + + return strings.Join(repos[i:], ",") +} + +func distributionSourceLabelKey(source string) string { + return labels.LabelDistributionSource + "." + source +} + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go new file mode 100644 index 00000000..6739e790 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/httpreadseeker.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "fmt" + "io" + + "github.com/containerd/errdefs" + "github.com/containerd/log" +) + +const maxRetry = 3 + +type httpReadSeeker struct { + size int64 + offset int64 + rc io.ReadCloser + open func(offset int64) (io.ReadCloser, error) + closed bool + + errsWithNoProgress int +} + +func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { + return &httpReadSeeker{ + size: size, + open: open, + }, nil +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + if n > 0 || err == nil { + hrs.errsWithNoProgress = 0 + } + if err == io.ErrUnexpectedEOF { + // connection closed unexpectedly. try reconnecting. + if n == 0 { + hrs.errsWithNoProgress++ + if hrs.errsWithNoProgress > maxRetry { + return // too many retries for this offset with no progress + } + } + if hrs.rc != nil { + if clsErr := hrs.rc.Close(); clsErr != nil { + log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser") + } + hrs.rc = nil + } + if _, err2 := hrs.reader(); err2 == nil { + return n, nil + } + } else if err == io.EOF { + // The CRI's imagePullProgressTimeout relies on responseBody.Close to + // update the process monitor's status. If the err is io.EOF, close + // the connection since there is no more available data. + if hrs.rc != nil { + if clsErr := hrs.rc.Close(); clsErr != nil { + log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF") + } + hrs.rc = nil + } + } + return +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.closed { + return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable) + } + + abs := hrs.offset + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs += offset + case io.SeekEnd: + if hrs.size == -1 { + return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable) + } + abs = hrs.size + offset + default: + return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument) + } + + if abs < 0 { + return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument) + } + + if abs != hrs.offset { + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser") + } + + hrs.rc = nil + } + + hrs.offset = abs + } + + return hrs.offset, nil +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.rc != nil { + return hrs.rc, nil + } + + if hrs.size == -1 || hrs.offset < hrs.size { + // only try to reopen the body request if we are seeking to a value + // less than the actual size. + if hrs.open == nil { + return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented) + } + + rc, err := hrs.open(hrs.offset) + if err != nil { + return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err) + } + + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser") + } + } + hrs.rc = rc + } else { + // There is an edge case here where offset == size of the content. If + // we seek, we will probably get an error for content that cannot be + // sought (?). In that case, we should err on committing the content, + // as the length is already satisfied but we just return the empty + // reader instead. + + hrs.rc = io.NopCloser(bytes.NewReader([]byte{})) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go new file mode 100644 index 00000000..f994fff5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go @@ -0,0 +1,558 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors" + "github.com/containerd/errdefs" + "github.com/containerd/log" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type dockerPusher struct { + *dockerBase + object string + + // TODO: namespace tracker + tracker StatusTracker +} + +// Writer implements Ingester API of content store. This allows the client +// to receive ErrUnavailable when there is already an on-going upload. +// Note that the tracker MUST implement StatusTrackLocker interface to avoid +// race condition on StatusTracker. +func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + if wOpts.Ref == "" { + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) + } + return p.push(ctx, wOpts.Desc, wOpts.Ref, true) +} + +func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) +} + +func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { + if l, ok := p.tracker.(StatusTrackLocker); ok { + l.Lock(ref) + defer l.Unlock(ref) + } + ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) + if err != nil { + return nil, err + } + status, err := p.tracker.GetStatus(ref) + if err == nil { + if status.Committed && status.Offset == status.Total { + return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists) + } + if unavailableOnFail && status.ErrClosed == nil { + // Another push of this ref is happening elsewhere. The rest of function + // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there + // is no actively-tracked ref already). + return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable) + } + // TODO: Handle incomplete status + } else if !errdefs.IsNotFound(err) { + return nil, fmt.Errorf("failed to get status: %w", err) + } + + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound) + } + + var ( + isManifest bool + existCheck []string + host = hosts[0] + ) + + if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) { + isManifest = true + existCheck = getManifestPath(p.object, desc.Digest) + } else { + existCheck = []string{"blobs", desc.Digest.String()} + } + + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) + + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if !errors.Is(err, ErrInvalidAuthorization) { + return nil, err + } + log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") + } else { + if resp.StatusCode == http.StatusOK { + var exists bool + if isManifest && existCheck[1] != desc.Digest.String() { + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if dgstHeader == desc.Digest { + exists = true + } + } else { + exists = true + } + + if exists { + p.tracker.SetStatus(ref, Status{ + Committed: true, + PushStatus: PushStatus{ + Exists: true, + }, + Status: content.Status{ + Ref: ref, + // TODO: Set updated time? + }, + }) + resp.Body.Close() + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) + } + } else if resp.StatusCode != http.StatusNotFound { + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + resp.Body.Close() + return nil, err + } + resp.Body.Close() + } + + if isManifest { + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) + } else { + // Start upload request + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + mountedFrom := "" + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.doWithRetries(pctx, nil) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + case http.StatusCreated: + mountedFrom = path.Join(p.refspec.Hostname(), fromRepo) + } + } + + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err) + } + return nil, err + } + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Committed: true, + PushStatus: PushStatus{ + MountedFrom: mountedFrom, + }, + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Offset: desc.Size, + }, + }) + return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err + } + + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) + // Support paths without host in location + if strings.HasPrefix(location, "/") { + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, fmt.Errorf("unable to parse location %v: %w", location, err) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + + // Check if different than what was requested, accounting for fallback in the transport layer + requested := resp.Request.URL + if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme { + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed") + } + } + } + q := lurl.Query() + q.Add("digest", desc.Digest.String()) + + req = p.request(lhost, http.MethodPut) + req.header.Set("Content-Type", "application/octet-stream") + req.path = lurl.Path + "?" + q.Encode() + } + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Expected: desc.Digest, + StartedAt: time.Now(), + }, + }) + + // TODO: Support chunked upload + + pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest) + + req.body = func() (io.ReadCloser, error) { + pr, pw := io.Pipe() + pushw.setPipe(pw) + return pr, nil + } + req.size = desc.Size + + go func() { + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + pushw.setError(err) + return + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + pushw.setError(err) + return + } + pushw.setResponse(resp) + }() + + return pushw, nil +} + +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + +type pushWriter struct { + base *dockerBase + ref string + + pipe *io.PipeWriter + + done chan struct{} + closeOnce sync.Once + + pipeC chan *io.PipeWriter + respC chan *http.Response + errC chan error + + isManifest bool + + expected digest.Digest + tracker StatusTracker +} + +func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter { + // Initialize and create response + return &pushWriter{ + base: db, + ref: ref, + expected: expected, + tracker: tracker, + pipeC: make(chan *io.PipeWriter, 1), + respC: make(chan *http.Response, 1), + errC: make(chan error, 1), + done: make(chan struct{}), + isManifest: isManifest, + } +} + +func (pw *pushWriter) setPipe(p *io.PipeWriter) { + select { + case <-pw.done: + case pw.pipeC <- p: + } +} + +func (pw *pushWriter) setError(err error) { + select { + case <-pw.done: + case pw.errC <- err: + } +} + +func (pw *pushWriter) setResponse(resp *http.Response) { + select { + case <-pw.done: + case pw.respC <- resp: + } +} + +func (pw *pushWriter) replacePipe(p *io.PipeWriter) error { + if pw.pipe == nil { + pw.pipe = p + return nil + } + + pw.pipe.CloseWithError(content.ErrReset) + pw.pipe = p + + // If content has already been written, the bytes + // cannot be written again and the caller must reset + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return err + } + status.Offset = 0 + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return content.ErrReset +} + +func (pw *pushWriter) Write(p []byte) (n int, err error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return n, err + } + + if pw.pipe == nil { + select { + case <-pw.done: + return 0, io.ErrClosedPipe + case p := <-pw.pipeC: + pw.replacePipe(p) + } + } else { + select { + case <-pw.done: + return 0, io.ErrClosedPipe + case p := <-pw.pipeC: + return 0, pw.replacePipe(p) + default: + } + } + + n, err = pw.pipe.Write(p) + if errors.Is(err, io.ErrClosedPipe) { + // if the pipe is closed, we might have the original error on the error + // channel - so we should try and get it + select { + case <-pw.done: + case err = <-pw.errC: + pw.Close() + case p := <-pw.pipeC: + return 0, pw.replacePipe(p) + case resp := <-pw.respC: + pw.setResponse(resp) + } + } + status.Offset += int64(n) + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return +} + +func (pw *pushWriter) Close() error { + // Ensure pipeC is closed but handle `Close()` being + // called multiple times without panicking + pw.closeOnce.Do(func() { + close(pw.done) + }) + if pw.pipe != nil { + status, err := pw.tracker.GetStatus(pw.ref) + if err == nil && !status.Committed { + // Closing an incomplete writer. Record this as an error so that following write can retry it. + status.ErrClosed = errors.New("closed incomplete writer") + pw.tracker.SetStatus(pw.ref, status) + } + return pw.pipe.Close() + } + return nil +} + +func (pw *pushWriter) Status() (content.Status, error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return content.Status{}, err + } + return status.Status, nil + +} + +func (pw *pushWriter) Digest() digest.Digest { + // TODO: Get rid of this function? + return pw.expected +} + +func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Check whether read has already thrown an error + if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { + return fmt.Errorf("pipe error before commit: %w", err) + } + + if err := pw.pipe.Close(); err != nil { + return err + } + // TODO: timeout waiting for response + var resp *http.Response + select { + case <-pw.done: + return io.ErrClosedPipe + case err := <-pw.errC: + pw.Close() + return err + case resp = <-pw.respC: + defer resp.Body.Close() + case p := <-pw.pipeC: + // check whether the pipe has changed in the commit, because sometimes Write + // can complete successfully, but the pipe may have changed. In that case, the + // content needs to be reset. + return pw.replacePipe(p) + } + + // 201 is specified return status, some registries return + // 200, 202 or 204. + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: + default: + return remoteserrors.NewUnexpectedStatusErr(resp) + } + + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return fmt.Errorf("failed to get status: %w", err) + } + + if size > 0 && size != status.Offset { + return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size) + } + + if expected == "" { + expected = status.Expected + } + + actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return fmt.Errorf("invalid content digest in response: %w", err) + } + + if actual != expected { + return fmt.Errorf("got digest %s, expected %s", actual, expected) + } + + status.Committed = true + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + + return nil +} + +func (pw *pushWriter) Truncate(size int64) error { + // TODO: if blob close request and start new request at offset + // TODO: always error on manifest + return errors.New("cannot truncate remote upload") +} + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go new file mode 100644 index 00000000..98cafcd0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go @@ -0,0 +1,244 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "errors" + "net" + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +// Has checks whether the capabilities list has the provide capability +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities + Header http.Header +} + +func (h RegistryHost) isProxy(refhost string) bool { + if refhost != h.Host { + if refhost != "docker.io" || h.Host != "registry-1.docker.io" { + return true + } + } + return false +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +// +// Note: this does not handle matching of ip addresses in octal, +// decimal or hex form. +func MatchLocalhost(host string) (bool, error) { + switch { + case host == "::1": + return true, nil + case host == "[::1]": + return true, nil + } + h, p, err := net.SplitHostPort(host) + + // addrError helps distinguish between errors of form + // "no colon in address" and "too many colons in address". + // The former is fine as the host string need not have a + // port. Latter needs to be handled. + addrError := &net.AddrError{ + Err: "missing port in address", + Addr: host, + } + if err != nil { + if err.Error() != addrError.Error() { + return false, err + } + // host string without any port specified + h = host + } else if len(p) == 0 { + return false, errors.New("invalid host name format") + } + + // use ipv4 dotted decimal for further checking + if h == "localhost" { + h = "127.0.0.1" + } + ip := net.ParseIP(h) + + return ip.IsLoopback(), nil +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go new file mode 100644 index 00000000..dce391c5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go @@ -0,0 +1,807 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "strings" + "sync" + + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + "github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors" + "github.com/containerd/containerd/v2/pkg/reference" + "github.com/containerd/containerd/v2/pkg/tracing" + "github.com/containerd/containerd/v2/version" +) + +var ( + // ErrInvalidAuthorization is used when credentials are passed to a server but + // those credentials are rejected. + ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 +) + +// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. +// An Authorizer is responsible for caching tokens or credentials used by +// requests. +type Authorizer interface { + // Authorize sets the appropriate `Authorization` header on the given + // request. + // + // If no authorization is found for the request, the request remains + // unmodified. It may also add an `Authorization` header as + // "bearer " + // "basic " + // + // It may return remotes/errors.ErrUnexpectedStatus, which for example, + // can be used by the caller to find out the status code returned by the registry. + Authorize(context.Context, *http.Request) error + + // AddResponses adds a 401 response for the authorizer to consider when + // authorizing requests. The last response should be unauthorized and + // the previous requests are used to consider redirects and retries + // that may have led to the 401. + // + // If response is not handled, returns `ErrNotImplemented` + AddResponses(context.Context, []*http.Response) error +} + +// ResolverOptions are used to configured a new Docker register resolver +type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // + // Deprecated: use Hosts. + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // + // Deprecated: use Hosts. + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // + // Deprecated: use Hosts. + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // + // Deprecated: use Hosts. + PlainHTTP bool + + // Client is the http client to used when making registry requests + // + // Deprecated: use Hosts. + Client *http.Client +} + +// DefaultHost is the default host function. +func DefaultHost(ns string) (string, error) { + if ns == "docker.io" { + return "registry-1.docker.io", nil + } + return ns, nil +} + +type dockerResolver struct { + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker +} + +// NewResolver returns a new resolver to a Docker registry +func NewResolver(options ResolverOptions) remotes.Resolver { + if options.Tracker == nil { + options.Tracker = NewInMemoryTracker() + } + + if options.Headers == nil { + options.Headers = make(http.Header) + } else { + // make a copy of the headers to avoid race due to concurrent map write + options.Headers = options.Headers.Clone() + } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} + if _, ok := options.Headers["Accept"]; !ok { + // set headers for all the types we support for resolution. + resolveHeader.Set("Accept", strings.Join([]string{ + images.MediaTypeDockerSchema2Manifest, + images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, "*/*", + }, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") + } + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) + } + return &dockerResolver{ + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, + } +} + +func getManifestMediaType(resp *http.Response) string { + // Strip encoding data (manifests should always be ascii JSON) + contentType := resp.Header.Get("Content-Type") + if sp := strings.IndexByte(contentType, ';'); sp != -1 { + contentType = contentType[0:sp] + } + + // As of Apr 30 2019 the registry.access.redhat.com registry does not specify + // the content type of any data but uses schema1 manifests. + if contentType == "text/plain" { + contentType = images.MediaTypeDockerSchema1Manifest + } + return contentType +} + +type countingReader struct { + reader io.Reader + bytesRead int64 +} + +func (r *countingReader) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + r.bytesRead += int64(n) + return n, err +} + +var _ remotes.Resolver = &dockerResolver{} + +func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return "", ocispec.Descriptor{}, err + } + refspec := base.refspec + if refspec.Object == "" { + return "", ocispec.Descriptor{}, reference.ErrObjectRequired + } + + var ( + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull + ) + + if dgst != "" { + if err := dgst.Validate(); err != nil { + // need to fail here, since we can't actually resolve the invalid + // digest. + return "", ocispec.Descriptor{}, err + } + + // turns out, we have a valid digest, make a url. + paths = append(paths, []string{"manifests", dgst.String()}) + + // fallback to blobs on not found. + paths = append(paths, []string{"blobs", dgst.String()}) + } else { + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound) + } + + ctx, err = ContextWithRepositoryScope(ctx, refspec, false) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + var ( + // firstErr is the most relevant error encountered during resolution. + // We use this to determine the error to return, making sure that the + // error created furthest through the resolution process is returned. + firstErr error + firstErrPriority int + ) + + nextHostOrFail := func(i int) string { + if i < len(hosts)-1 { + return "trying next host" + } + return "fetch failed" + } + + for _, u := range paths { + for i, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) + + req := base.request(host, http.MethodHead, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err) + } + if firstErrPriority < 1 { + firstErr = err + firstErrPriority = 1 + } + log.G(ctx).WithError(err).Info(nextHostOrFail(i)) + continue // try another host + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + if firstErrPriority < 2 { + firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound) + firstErrPriority = 2 + } + log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status) + continue + } + if resp.StatusCode > 399 { + if firstErrPriority < 3 { + firstErr = remoteerrors.NewUnexpectedStatusErr(resp) + firstErrPriority = 3 + } + log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status) + continue // try another host + } + return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + err = func() error { + defer resp.Body.Close() + if dgst != "" { + _, err = io.Copy(io.Discard, &bodyReader) + return err + } + + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return err + } + + dgst = digest.FromBytes(b) + return nil + } + + dgst, err = digest.FromReader(&bodyReader) + return err + }() + if err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if firstErrPriority < 4 { + firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound) + firstErrPriority = 4 + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil + } + } + + // If above loop terminates without return or error, then no registries + // were provided. + if firstErr == nil { + firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound) + } + + return "", ocispec.Descriptor{}, firstErr +} + +func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerFetcher{ + dockerBase: base, + }, nil +} + +func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerPusher{ + dockerBase: base, + object: base.refspec.Object, + tracker: r.tracker, + }, nil +} + +func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + return r.base(refspec) +} + +type dockerBase struct { + refspec reference.Spec + repository string + hosts []RegistryHost + header http.Header +} + +func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { + host := refspec.Hostname() + hosts, err := r.hosts(host) + if err != nil { + return nil, err + } + return &dockerBase{ + refspec: refspec, + repository: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, + }, nil +} + +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := r.header.Clone() + if header == nil { + header = http.Header{} + } + + for key, value := range host.Header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.repository}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { + // Check if has header for host + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { + return err + } + } + + return nil +} + +func (r *request) addNamespace(ns string) (err error) { + if !r.host.isProxy(ns) { + return nil + } + var q url.Values + // Parse query + if i := strings.IndexByte(r.path, '?'); i > 0 { + r.path = r.path[:i+1] + q, err = url.ParseQuery(r.path[i+1:]) + if err != nil { + return + } + } else { + r.path = r.path + "?" + q = url.Values{} + } + q.Add("ns", ns) + + r.path = r.path + q.Encode() + + return +} + +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequestWithContext(ctx, r.method, u, nil) + if err != nil { + return nil, err + } + if r.header == nil { + req.Header = http.Header{} + } else { + req.Header = r.header.Clone() // headers need to be copied to avoid concurrent map access + } + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") + if err := r.authorize(ctx, req); err != nil { + return nil, fmt.Errorf("failed to authorize: %w", err) + } + + client := &http.Client{} + if r.host.Client != nil { + *client = *r.host.Client + } + if client.CheckRedirect == nil { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + if err := r.authorize(ctx, req); err != nil { + return fmt.Errorf("failed to authorize redirect: %w", err) + } + return nil + } + } + + tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest")) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to do request: %w", err) + } + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") + return resp, nil +} + +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) + if err != nil { + return nil, err + } + + responses = append(responses, resp) + retry, err := r.retryRequest(ctx, responses) + if err != nil { + resp.Body.Close() + return nil, err + } + if retry { + resp.Body.Close() + return r.doWithRetries(ctx, responses) + } + return resp, err +} + +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { + if len(responses) > 5 { + return false, nil + } + last := responses[len(responses)-1] + switch last.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil + } else if !errdefs.IsNotImplemented(err) { + return false, err + } + } + + return false, nil + case http.StatusMethodNotAllowed: + // Support registries which have not properly implemented the HEAD method for + // manifests endpoint + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil + } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil + } + + // TODO: Handle 50x errors accounting for attempt history + return false, nil +} + +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) log.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return fields +} + +func responseFields(resp *http.Response) log.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return fields +} + +// IsLocalhost checks if the registry host is local. +func IsLocalhost(host string) bool { + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + + if host == "localhost" { + return true + } + + ip := net.ParseIP(host) + return ip.IsLoopback() +} + +// NewHTTPFallback returns http.RoundTripper which allows fallback from https to +// http for registry endpoints with configurations for both http and TLS, +// such as defaulted localhost endpoints. +func NewHTTPFallback(transport http.RoundTripper) http.RoundTripper { + return &httpFallback{ + super: transport, + } +} + +type httpFallback struct { + super http.RoundTripper + host string + mu sync.Mutex +} + +func (f *httpFallback) RoundTrip(r *http.Request) (*http.Response, error) { + f.mu.Lock() + fallback := f.host == r.URL.Host + f.mu.Unlock() + + // only fall back if the same host had previously fell back + if !fallback { + resp, err := f.super.RoundTrip(r) + if !isTLSError(err) && !isPortError(err, r.URL.Host) { + return resp, err + } + } + + plainHTTPUrl := *r.URL + plainHTTPUrl.Scheme = "http" + + plainHTTPRequest := *r + plainHTTPRequest.URL = &plainHTTPUrl + + if !fallback { + f.mu.Lock() + if f.host != r.URL.Host { + f.host = r.URL.Host + } + f.mu.Unlock() + + // update body on the second attempt + if r.Body != nil && r.GetBody != nil { + body, err := r.GetBody() + if err != nil { + return nil, err + } + plainHTTPRequest.Body = body + } + } + + return f.super.RoundTrip(&plainHTTPRequest) +} + +func isTLSError(err error) bool { + if err == nil { + return false + } + var tlsErr tls.RecordHeaderError + if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" { + return true + } + if strings.Contains(err.Error(), "TLS handshake timeout") { + return true + } + + return false +} + +func isPortError(err error, host string) bool { + if isConnError(err) || os.IsTimeout(err) { + if _, port, _ := net.SplitHostPort(host); port != "" { + // Port is specified, will not retry on different port with scheme change + return false + } + return true + } + + return false +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go new file mode 100644 index 00000000..4ef0e006 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_unix.go @@ -0,0 +1,28 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "errors" + "syscall" +) + +func isConnError(err error) bool { + return errors.Is(err, syscall.ECONNREFUSED) +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go new file mode 100644 index 00000000..9c98df04 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "errors" + "syscall" + + "golang.org/x/sys/windows" +) + +func isConnError(err error) bool { + return errors.Is(err, syscall.ECONNREFUSED) || errors.Is(err, windows.WSAECONNREFUSED) +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go new file mode 100644 index 00000000..e724e4e5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go @@ -0,0 +1,626 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1. +// +// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. +package schema1 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" + "github.com/containerd/containerd/v2/pkg/archive/compression" + "github.com/containerd/containerd/v2/pkg/deprecation" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/errdefs" + "github.com/containerd/log" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" +) + +const ( + manifestSizeLimit = 8e6 // 8MB + labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" +) + +type blobState struct { + diffID digest.Digest + empty bool +} + +// Converter converts schema1 manifests to schema2 on fetch +type Converter struct { + contentStore content.Store + fetcher remotes.Fetcher + + pulledManifest *manifest + + mu sync.Mutex + blobMap map[digest.Digest]blobState + layerBlobs map[digest.Digest]ocispec.Descriptor +} + +var ErrDisabled = fmt.Errorf("Pulling Schema 1 images have been deprecated and disabled by default since containerd v2.0. "+ + "As a workaround you may set an environment variable `%s=1`, but this will be completely removed in containerd v2.1.", + deprecation.EnvPullSchema1Image) + +// NewConverter returns a new converter +func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) (*Converter, error) { + s := os.Getenv(deprecation.EnvPullSchema1Image) + if s == "" { + return nil, ErrDisabled + } + enable, err := strconv.ParseBool(s) + if err != nil { + return nil, fmt.Errorf("failed to parse `%s=%s`: %w", deprecation.EnvPullSchema1Image, s, err) + } + if !enable { + return nil, ErrDisabled + } + log.L.Warn(ErrDisabled) + return &Converter{ + contentStore: contentStore, + fetcher: fetcher, + blobMap: map[digest.Digest]blobState{}, + layerBlobs: map[digest.Digest]ocispec.Descriptor{}, + }, nil +} + +// Handle fetching descriptors for a docker media type +func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + if err := c.fetchManifest(ctx, desc); err != nil { + return nil, err + } + + m := c.pulledManifest + if len(m.FSLayers) != len(m.History) { + return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") + } + descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) + + for i := range m.FSLayers { + if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { + empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) + if err != nil { + return nil, err + } + + // Do no attempt to download a known empty blob + if !empty { + descs = append([]ocispec.Descriptor{ + { + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: c.pulledManifest.FSLayers[i].BlobSum, + Size: -1, + }, + }, descs...) + } + c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ + empty: empty, + } + } + } + return descs, nil + case images.MediaTypeDockerSchema2LayerGzip: + if c.pulledManifest == nil { + return nil, errors.New("manifest required for schema 1 blob pull") + } + return nil, c.fetchBlob(ctx, desc) + default: + return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) + } +} + +// ConvertOptions provides options on converting a docker schema1 manifest. +type ConvertOptions struct { + // ManifestMediaType specifies the media type of the manifest OCI descriptor. + ManifestMediaType string + + // ConfigMediaType specifies the media type of the manifest config OCI + // descriptor. + ConfigMediaType string +} + +// ConvertOpt allows configuring a convert operation. +type ConvertOpt func(context.Context, *ConvertOptions) error + +// UseDockerSchema2 is used to indicate that a schema1 manifest should be +// converted into the media types for a docker schema2 manifest. +func UseDockerSchema2() ConvertOpt { + return func(ctx context.Context, o *ConvertOptions) error { + o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest + o.ConfigMediaType = images.MediaTypeDockerSchema2Config + return nil + } +} + +// Convert a docker manifest to an OCI descriptor +func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { + co := ConvertOptions{ + ManifestMediaType: ocispec.MediaTypeImageManifest, + ConfigMediaType: ocispec.MediaTypeImageConfig, + } + for _, opt := range opts { + if err := opt(ctx, &co); err != nil { + return ocispec.Descriptor{}, err + } + } + + history, diffIDs, err := c.schema1ManifestHistory() + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err) + } + + var img ocispec.Image + if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err) + } + + img.History = history + img.RootFS = ocispec.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + + b, err := json.MarshalIndent(img, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) + } + + config := ocispec.Descriptor{ + MediaType: co.ConfigMediaType, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + layers := make([]ocispec.Descriptor, len(diffIDs)) + for i, diffID := range diffIDs { + layers[i] = c.layerBlobs[diffID] + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: config, + Layers: layers, + } + + mb, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) + } + + desc := ocispec.Descriptor{ + MediaType: co.ManifestMediaType, + Digest: digest.Canonical.FromBytes(mb), + Size: int64(len(mb)), + } + + labels := map[string]string{} + labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() + for i, ch := range manifest.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err) + } + + ref = remotes.MakeRefKey(ctx, config) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err) + } + + return desc, nil +} + +// ReadStripSignature reads in a schema1 manifest and returns a byte array +// with the "signatures" field stripped +func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { + b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB + if err != nil { + return nil, err + } + + return stripSignature(b) +} + +func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch schema 1") + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + + b, err := ReadStripSignature(rc) + rc.Close() + if err != nil { + return err + } + + var m manifest + if err := json.Unmarshal(b, &m); err != nil { + return err + } + if len(m.Manifests) != 0 || len(m.Layers) != 0 { + return errors.New("converter: expected schema1 document but found extra keys") + } + c.pulledManifest = &m + + return nil +} + +func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch blob") + + var ( + ref = remotes.MakeRefKey(ctx, desc) + calc = newBlobStateCalculator() + compressMethod = compression.Gzip + ) + + // size may be unknown, set to zero for content ingest + ingestDesc := desc + if ingestDesc.Size == -1 { + ingestDesc.Size = 0 + } + + cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + reuse, err := c.reuseLabelBlobState(ctx, desc) + if err != nil { + return err + } + + if reuse { + return nil + } + + ra, err := c.contentStore.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + r, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + if err != nil { + return err + } + } else { + defer cw.Close() + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + eg, _ := errgroup.WithContext(ctx) + pr, pw := io.Pipe() + + eg.Go(func() error { + r, err := compression.DecompressStream(pr) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + pr.CloseWithError(err) + return err + }) + + eg.Go(func() error { + defer pw.Close() + + return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) + }) + + if err := eg.Wait(); err != nil { + return err + } + } + + if desc.Size == -1 { + info, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return fmt.Errorf("failed to get blob info: %w", err) + } + desc.Size = info.Size + } + + if compressMethod == compression.Uncompressed { + log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") + desc.MediaType = images.MediaTypeDockerSchema2Layer + } + + state := calc.State() + + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + labels.LabelUncompressed: state.diffID.String(), + labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), + }, + } + + if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + return fmt.Errorf("failed to update uncompressed label: %w", err) + } + + c.mu.Lock() + c.blobMap[desc.Digest] = state + c.layerBlobs[state.diffID] = desc + c.mu.Unlock() + + return nil +} + +func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { + cinfo, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return false, fmt.Errorf("failed to get blob info: %w", err) + } + desc.Size = cinfo.Size + + diffID, ok := cinfo.Labels[labels.LabelUncompressed] + if !ok { + return false, nil + } + + emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] + if !ok { + return false, nil + } + + isEmpty, err := strconv.ParseBool(emptyVal) + if err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) + return false, nil + } + + bState := blobState{empty: isEmpty} + + if bState.diffID, err = digest.Parse(diffID); err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID) + return false, nil + } + + // NOTE: there is no need to read header to get compression method + // because there are only two kinds of methods. + if bState.diffID == desc.Digest { + desc.MediaType = images.MediaTypeDockerSchema2Layer + } else { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + } + + c.mu.Lock() + c.blobMap[desc.Digest] = bState + c.layerBlobs[bState.diffID] = desc + c.mu.Unlock() + return true, nil +} + +func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { + if c.pulledManifest == nil { + return nil, nil, errors.New("missing schema 1 manifest for conversion") + } + m := *c.pulledManifest + + if len(m.History) == 0 { + return nil, nil, errors.New("no history") + } + + history := make([]ocispec.History, len(m.History)) + diffIDs := []digest.Digest{} + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err) + } + + blobSum := m.FSLayers[i].BlobSum + + state := c.blobMap[blobSum] + + history[len(history)-i-1] = ocispec.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: state.empty, + } + + if !state.empty { + diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) + + } + } + + return history, diffIDs, nil +} + +type fsLayer struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type history struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type manifest struct { + FSLayers []fsLayer `json:"fsLayers"` + History []history `json:"history"` + Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest + Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway *bool `json:"throwaway,omitempty"` + Size *int `json:"Size,omitempty"` // used before ThrowAway field + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} + +// isEmptyLayer returns whether the v1 compatibility history describes an +// empty layer. A return value of true indicates the layer is empty, +// however false does not indicate non-empty. +func isEmptyLayer(compatHistory []byte) (bool, error) { + var h v1History + if err := json.Unmarshal(compatHistory, &h); err != nil { + return false, err + } + + if h.ThrowAway != nil { + return *h.ThrowAway, nil + } + if h.Size != nil { + return *h.Size == 0, nil + } + + // If no `Size` or `throwaway` field is given, then + // it cannot be determined whether the layer is empty + // from the history, return false + return false, nil +} + +type signature struct { + Signatures []jsParsedSignature `json:"signatures"` +} + +type jsParsedSignature struct { + Protected string `json:"protected"` +} + +type protectedBlock struct { + Length int `json:"formatLength"` + Tail string `json:"formatTail"` +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func stripSignature(b []byte) ([]byte, error) { + var sig signature + if err := json.Unmarshal(b, &sig); err != nil { + return nil, err + } + if len(sig.Signatures) == 0 { + return nil, errors.New("no signatures") + } + pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) + if err != nil { + return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err) + } + + var protected protectedBlock + if err := json.Unmarshal(pb, &protected); err != nil { + return nil, err + } + + if protected.Length > len(b) { + return nil, errors.New("invalid protected length block") + } + + tail, err := joseBase64UrlDecode(protected.Tail) + if err != nil { + return nil, fmt.Errorf("invalid tail base 64 value: %w", err) + } + + return append(b[:protected.Length], tail...), nil +} + +type blobStateCalculator struct { + empty bool + digester digest.Digester +} + +func newBlobStateCalculator() *blobStateCalculator { + return &blobStateCalculator{ + empty: true, + digester: digest.Canonical.Digester(), + } +} + +func (c *blobStateCalculator) Write(p []byte) (int, error) { + if c.empty { + for _, b := range p { + if b != 0x00 { + c.empty = false + break + } + } + } + return c.digester.Hash().Write(p) +} + +func (c *blobStateCalculator) State() blobState { + return blobState{ + empty: c.empty, + diffID: c.digester.Digest(), + } +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go new file mode 100644 index 00000000..8135498b --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/containerd/containerd/v2/pkg/reference" +) + +// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" +// for "host/foo/bar:baz". +// When push is true, both pull and push are added to the scope. +func RepositoryScope(refspec reference.Spec, push bool) (string, error) { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return "", err + } + s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" + if push { + s += ",push" + } + return s, nil +} + +// tokenScopesKey is used for the key for context.WithValue(). +// value: []string (e.g. {"registry:foo/bar:pull"}) +type tokenScopesKey struct{} + +// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. +func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { + s, err := RepositoryScope(refspec, push) + if err != nil { + return nil, err + } + return WithScope(ctx, s), nil +} + +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// ContextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) +} + +// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func GetTokenScopes(ctx context.Context, common []string) []string { + scopes := []string{} + if x := ctx.Value(tokenScopesKey{}); x != nil { + scopes = append(scopes, x.([]string)...) + } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + if len(scopes) == 0 { + return scopes + } + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue + } + + l++ + scopes[l] = scopes[idx] + } + return scopes[:l+1] +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go new file mode 100644 index 00000000..9835525a --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/docker/status.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "sync" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/errdefs" + "github.com/moby/locker" +) + +// Status of a content operation +type Status struct { + content.Status + + Committed bool + + // ErrClosed contains error encountered on close. + ErrClosed error + + // UploadUUID is used by the Docker registry to reference blob uploads + UploadUUID string + + // PushStatus contains status related to push. + PushStatus +} + +type PushStatus struct { + // MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed). + MountedFrom string + + // Exists indicates whether content already exists in the repository and wasn't uploaded. + Exists bool +} + +// StatusTracker to track status of operations +type StatusTracker interface { + GetStatus(string) (Status, error) + SetStatus(string, Status) +} + +// StatusTrackLocker to track status of operations with lock +type StatusTrackLocker interface { + StatusTracker + Lock(string) + Unlock(string) +} + +type memoryStatusTracker struct { + statuses map[string]Status + m sync.Mutex + locker *locker.Locker +} + +// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory +func NewInMemoryTracker() StatusTrackLocker { + return &memoryStatusTracker{ + statuses: map[string]Status{}, + locker: locker.New(), + } +} + +func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { + t.m.Lock() + defer t.m.Unlock() + status, ok := t.statuses[ref] + if !ok { + return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound) + } + return status, nil +} + +func (t *memoryStatusTracker) SetStatus(ref string, status Status) { + t.m.Lock() + t.statuses[ref] = status + t.m.Unlock() +} + +func (t *memoryStatusTracker) Lock(ref string) { + t.locker.Lock(ref) +} + +func (t *memoryStatusTracker) Unlock(ref string) { + t.locker.Unlock(ref) +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go new file mode 100644 index 00000000..f60ff0fc --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errors + +import ( + "fmt" + "io" + "net/http" +) + +var _ error = ErrUnexpectedStatus{} + +// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status +type ErrUnexpectedStatus struct { + Status string + StatusCode int + Body []byte + RequestURL, RequestMethod string +} + +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status) +} + +// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response +func NewUnexpectedStatusErr(resp *http.Response) error { + var b []byte + if resp.Body != nil { + b, _ = io.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + } + err := ErrUnexpectedStatus{ + Body: b, + Status: resp.Status, + StatusCode: resp.StatusCode, + RequestMethod: resp.Request.Method, + } + if resp.Request.URL != nil { + err.RequestURL = resp.Request.URL.String() + } + return err +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go b/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go new file mode 100644 index 00000000..a3e1ff98 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go @@ -0,0 +1,396 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" +) + +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + +// MakeRefKey returns a unique reference for the descriptor. This reference can be +// used to lookup ongoing processes related to the descriptor. This function +// may look to the context to namespace the reference appropriately. +func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + key := desc.Digest.String() + if desc.Annotations != nil { + if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { + key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) + } + } + + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + key + } + } + + switch { + case images.IsManifestType(desc.MediaType): + return "manifest-" + key + case images.IsIndexType(desc.MediaType): + return "index-" + key + case images.IsLayerType(desc.MediaType): + return "layer-" + key + case images.IsKnownConfig(desc.MediaType): + return "config-" + key + case images.IsAttestationType(desc.MediaType): + return "attestation-" + key + default: + log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) + return "unknown-" + key + } +} + +// FetchHandler returns a handler that will fetch all content into the ingester +// discovered in a call to Dispatch. Use with ChildrenHandler to do a full +// recursive fetch. +func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + if desc.MediaType == images.MediaTypeDockerSchema1Manifest { + return nil, fmt.Errorf("%v not supported", desc.MediaType) + } + err := Fetch(ctx, ingester, fetcher, desc) + if errdefs.IsAlreadyExists(err) { + return nil, nil + } + return nil, err + } +} + +// Fetch fetches the given digest into the provided ingester +func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch") + + cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + if err != nil { + return err + } + defer cw.Close() + + ws, err := cw.Status() + if err != nil { + return err + } + + if desc.Size == 0 { + // most likely a poorly configured registry/web front end which responded with no + // Content-Length header; unable (not to mention useless) to commit a 0-length entry + // into the content store. Error out here otherwise the error sent back is confusing + return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument) + } + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) + } + return err + } + + if desc.Size == int64(len(desc.Data)) { + return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest) + } + + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) +} + +// PushHandler returns a handler that will push all content from the provider +// using a writer from the pusher. +func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + err := push(ctx, provider, pusher, desc) + return nil, err + } +} + +func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("push") + + var ( + cw content.Writer + err error + ) + if cs, ok := pusher.(content.Ingester); ok { + cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + } else { + cw, err = pusher.Push(ctx, desc) + } + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + return nil + } + defer cw.Close() + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + rd := io.NewSectionReader(ra, 0, desc.Size) + return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) +} + +// PushContent pushes content specified by the descriptor from the provider. +// +// Base handlers can be provided which will be called before any push specific +// handlers. +// +// If the passed in content.Provider is also a content.InfoProvider (such as +// content.Manager) then this will also annotate the distribution sources using +// labels prefixed with "containerd.io/distribution.source". +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + + var m sync.Mutex + manifests := []ocispec.Descriptor{} + indexStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if images.IsManifestType(desc.MediaType) { + m.Lock() + manifests = append(manifests, desc) + m.Unlock() + return nil, images.ErrStopHandler + } else if images.IsIndexType(desc.MediaType) { + m.Lock() + indexStack = append(indexStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + } + return nil, nil + }) + + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + var handler images.Handler + if m, ok := store.(content.InfoProvider); ok { + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) + handler = images.Handlers(annotateHandler, filterHandler, pushHandler) + } else { + handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler) + } + + if wrapper != nil { + handler = wrapper(handler) + } + + if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { + return err + } + + if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil { + return err + } + + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(indexStack) - 1; i >= 0; i-- { + err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i]) + if err != nil { + // TODO(estesp): until we have a more complete method for index push, we need to report + // missing dependencies in an index/manifest list by sensing the "400 Bad Request" + // as a marker for this problem + if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) + } + return err + } + } + + return nil +} + +// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable". +// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed. +// +// This is based on the media type of the content: +// - application/vnd.oci.image.layer.nondistributable +// - application/vnd.docker.image.rootfs.foreign +func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if images.IsNonDistributable(desc.MediaType) { + log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") + return nil, images.ErrSkipDesc + } + + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + if len(children) == 0 { + return nil, nil + } + + out := make([]ocispec.Descriptor, 0, len(children)) + for _, child := range children { + if !images.IsNonDistributable(child.MediaType) { + out = append(out, child) + } else { + log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob") + } + } + return out, nil + } +} + +// FilterManifestByPlatformHandler allows Handler to handle non-target +// platform's manifest and configuration data. +func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // no platform information + if desc.Platform == nil || m == nil { + return children, nil + } + + if images.IsManifestType(desc.MediaType) && !m.Match(*desc.Platform) { + var descs []ocispec.Descriptor + for _, child := range children { + if images.IsConfigType(child.MediaType) { + descs = append(descs, child) + } + } + return descs, nil + } + return children, nil + } +} + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.InfoProvider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // Distribution source is only used for config or blob but may be inherited from + // a manifest or manifest list + if !images.IsManifestType(desc.MediaType) && !images.IsIndexType(desc.MediaType) { + return children, nil + } + + parentSourceAnnotations := desc.Annotations + var parentLabels map[string]string + if pi, err := provider.Info(ctx, desc.Digest); err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } else { + parentLabels = pi.Labels + } + + for i := range children { + child := children[i] + + info, err := provider.Info(ctx, child.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + } + copyDistributionSourceLabels(info.Labels, &child) + + // Annotate with parent labels for cross repo mount or fetch. + // Parent sources may apply to all children since most registries + // enforce that children exist before the manifests. + copyDistributionSourceLabels(parentSourceAnnotations, &child) + copyDistributionSourceLabels(parentLabels, &child) + + children[i] = child + } + return children, nil + } +} + +func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) { + for k, v := range from { + if !strings.HasPrefix(k, labels.LabelDistributionSource+".") { + continue + } + + if to.Annotations == nil { + to.Annotations = make(map[string]string) + } else { + // Only propagate the parent label if the child doesn't already have it. + if _, has := to.Annotations[k]; has { + continue + } + } + to.Annotations[k] = v + } +} diff --git a/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go b/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go new file mode 100644 index 00000000..c39b9378 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go @@ -0,0 +1,111 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "io" + + "github.com/containerd/containerd/v2/core/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver provides remotes based on a locator. +type Resolver interface { + // Resolve attempts to resolve the reference into a name and descriptor. + // + // The argument `ref` should be a scheme-less URI representing the remote. + // Structurally, it has a host and path. The "host" can be used to directly + // reference a specific host or be matched against a specific handler. + // + // The returned name should be used to identify the referenced entity. + // Depending on the remote namespace, this may be immutable or mutable. + // While the name may differ from ref, it should itself be a valid ref. + // + // If the resolution fails, an error will be returned. + Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) + + // Fetcher returns a new fetcher for the provided reference. + // All content fetched from the returned fetcher will be + // from the namespace referred to by ref. + Fetcher(ctx context.Context, ref string) (Fetcher, error) + + // Pusher returns a new pusher for the provided reference + // The returned Pusher should satisfy content.Ingester and concurrent attempts + // to push the same blob using the Ingester API should result in ErrUnavailable. + Pusher(ctx context.Context, ref string) (Pusher, error) +} + +// Fetcher fetches content. +// A fetcher implementation may implement the FetcherByDigest interface too. +type Fetcher interface { + // Fetch the resource identified by the descriptor. + Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +} + +// FetcherByDigest fetches content by the digest. +type FetcherByDigest interface { + // FetchByDigest fetches the resource identified by the digest. + // + // FetcherByDigest usually returns an incomplete descriptor. + // Typically, the media type is always set to "application/octet-stream", + // and the annotations are unset. + FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error) +} + +// Pusher pushes content +type Pusher interface { + // Push returns a content writer for the given resource identified + // by the descriptor. + Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) +} + +// FetcherFunc allows package users to implement a Fetcher with just a +// function. +type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch content +func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, desc) +} + +// PusherFunc allows package users to implement a Pusher with just a +// function. +type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) + +// Push content +func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return fn(ctx, desc) +} + +// FetchByDigestConfig provides configuration for fetching content by digest +type FetchByDigestConfig struct { + //Mediatype specifies mediatype header to append for fetch request + Mediatype string +} + +// FetchByDigestOpts allows callers to set options for fetch object +type FetchByDigestOpts func(context.Context, *FetchByDigestConfig) error + +// WithMediaType sets the media type header for fetch request +func WithMediaType(mediatype string) FetchByDigestOpts { + return func(ctx context.Context, cfg *FetchByDigestConfig) error { + cfg.Mediatype = mediatype + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go new file mode 100644 index 00000000..6b55666d --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go @@ -0,0 +1,130 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fsverity + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/containerd/containerd/v2/pkg/kernelversion" + "golang.org/x/sys/unix" +) + +type fsverityEnableArg struct { + version uint32 + hashAlgorithm uint32 + blockSize uint32 + saltSize uint32 + saltPtr uint64 + sigSize uint32 + reserved1 uint32 + sigPtr uint64 + reserved2 [11]uint64 +} + +const ( + defaultBlockSize int = 4096 + maxDigestSize uint16 = 64 +) + +func IsSupported(rootPath string) (bool, error) { + minKernelVersion := kernelversion.KernelVersion{Kernel: 5, Major: 4} + s, err := kernelversion.GreaterEqualThan(minKernelVersion) + if err != nil { + return s, err + } + + integrityDir, err := os.MkdirTemp(rootPath, ".fsverity-check-*") + if err != nil { + return false, err + } + defer os.RemoveAll(integrityDir) + + digestPath := filepath.Join(integrityDir, "supported") + digestFile, err := os.Create(digestPath) + if err != nil { + return false, err + } + + digestFile.Close() + + eerr := Enable(digestPath) + if eerr != nil { + return false, eerr + } + + return true, nil +} + +func IsEnabled(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + var attr int32 + + _, _, flagErr := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_GETFLAGS), uintptr(unsafe.Pointer(&attr))) + if flagErr != 0 { + return false, fmt.Errorf("error getting inode flags: %w", flagErr) + } + + if attr&unix.FS_VERITY_FL == unix.FS_VERITY_FL { + return true, nil + } + + return false, nil +} + +func Enable(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + + var args = &fsverityEnableArg{} + args.version = 1 + args.hashAlgorithm = 1 + + // fsverity block size should be the minimum between the page size + // and the file system block size + // If neither value is retrieved successfully, set fsverity block size to the default value + blockSize := unix.Getpagesize() + + s := unix.Stat_t{} + serr := unix.Stat(path, &s) + if serr == nil && int(s.Blksize) < blockSize { + blockSize = int(s.Blksize) + } + + if blockSize <= 0 { + blockSize = defaultBlockSize + } + + args.blockSize = uint32(blockSize) + + _, _, errno := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(args))) + if errno != 0 { + return fmt.Errorf("enable fsverity failed: %w", errno) + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go new file mode 100644 index 00000000..f50789ad --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go @@ -0,0 +1,33 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fsverity + +import "fmt" + +func IsSupported(rootPath string) (bool, error) { + return false, fmt.Errorf("fsverity is only supported on Linux systems") +} + +func IsEnabled(path string) (bool, error) { + return false, fmt.Errorf("fsverity is only supported on Linux systems") +} + +func Enable(_ string) error { + return fmt.Errorf("fsverity is only supported on Linux systems") +} diff --git a/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go b/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go new file mode 100644 index 00000000..f4b657d7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/internal/randutil/randutil.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package randutil provides utilities for [cyrpto/rand]. +package randutil + +import ( + "crypto/rand" + "math" + "math/big" +) + +// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. +func Int63n(n int64) int64 { + b, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + panic(err) + } + return b.Int64() +} + +// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. +func Int63() int64 { + return Int63n(math.MaxInt64) +} + +// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. +func Intn(n int) int { + return int(Int63n(int64(n))) +} + +// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. +func Int() int { + return int(Int63()) +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go new file mode 100644 index 00000000..cba5e477 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go @@ -0,0 +1,330 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/containerd/log" + "github.com/klauspost/compress/zstd" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip + // Zstd is zstd compression algorithm. + Zstd + // Unknown is used when a plugin handles the algorithm. + Unknown +) + +const ( + disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" + disableIgzipEnv = "CONTAINERD_DISABLE_IGZIP" +) + +var ( + initGzip sync.Once + gzipPath string +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +// DecompressReadCloser include the stream after decompress and the compress method detected. +type DecompressReadCloser interface { + io.ReadCloser + // GetCompression returns the compress method which is used before decompressing + GetCompression() Compression +} + +type readCloserWrapper struct { + io.Reader + compression Compression + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +func (r *readCloserWrapper) GetCompression() Compression { + return r.compression +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + gzipMagic = []byte{0x1F, 0x8B, 0x08} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, fn := range map[Compression]matcher{ + Gzip: magicNumberMatcher(gzipMagic), + Zstd: zstdMatcher(), + } { + if fn(source) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + switch compression := DetectCompression(bs); compression { + case Uncompressed: + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + compression: compression, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + case Zstd: + return zstd.NewWriter(dest) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + case Zstd: + return "zst" + case Unknown: + return "unknown" + } + return "" +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + initGzip.Do(func() { + if gzipPath = detectCommand("igzip", disableIgzipEnv); gzipPath != "" { + log.L.Debug("using igzip for decompression") + return + } + if gzipPath = detectCommand("unpigz", disablePigzEnv); gzipPath != "" { + log.L.Debug("using unpigz for decompression") + } + }) + + if gzipPath == "" { + return gzip.NewReader(buf) + } + return cmdStream(exec.CommandContext(ctx, gzipPath, "-d", "-c"), buf) +} + +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + if err := cmd.Start(); err != nil { + return nil, err + } + + go func() { + if err := cmd.Wait(); err != nil { + writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + writer.Close() + } + }() + + return reader, nil +} + +func detectCommand(path, disableEnvName string) string { + // Check if this command is disabled via the env variable + value := os.Getenv(disableEnvName) + if value != "" { + disable, err := strconv.ParseBool(value) + if err != nil { + log.L.WithError(err).Warnf("could not parse %s: %s", disableEnvName, value) + } + + if disable { + return "" + } + } + + path, err := exec.LookPath(path) + if err != nil { + log.L.WithError(err).Debugf("%s not found", path) + return "" + } + + return path +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go new file mode 100644 index 00000000..3516494a --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go @@ -0,0 +1,28 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bytes" +) + +func FuzzDecompressStream(data []byte) int { + _, _ = DecompressStream(bytes.NewReader(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go b/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go new file mode 100644 index 00000000..603bf41f --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go @@ -0,0 +1,72 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package deprecation + +type Warning string + +const ( + // Prefix is a standard prefix for all Warnings, used for filtering plugin Exports + Prefix = "io.containerd.deprecation/" + // PullSchema1Image is a warning for the use of schema 1 images + PullSchema1Image Warning = Prefix + "pull-schema-1-image" + // GoPluginLibrary is a warning for the use of dynamic library Go plugins + GoPluginLibrary Warning = Prefix + "go-plugin-library" + // CRIRegistryMirrors is a warning for the use of the `mirrors` property + CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors" + // CRIRegistryAuths is a warning for the use of the `auths` property + CRIRegistryAuths Warning = Prefix + "cri-registry-auths" + // CRIRegistryConfigs is a warning for the use of the `configs` property + CRIRegistryConfigs Warning = Prefix + "cri-registry-configs" + // OTLPTracingConfig is a warning for the use of the `otlp` property + TracingOTLPConfig Warning = Prefix + "tracing-processor-config" + // TracingServiceConfig is a warning for the use of the `tracing` property + TracingServiceConfig Warning = Prefix + "tracing-service-config" +) + +const ( + EnvPrefix = "CONTAINERD_ENABLE_DEPRECATED_" + EnvPullSchema1Image = EnvPrefix + "PULL_SCHEMA_1_IMAGE" +) + +var messages = map[Warning]string{ + PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7, disabled in containerd v2.0, and will be removed in containerd v2.1. " + + `Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`, + GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes are deprecated since containerd v2.0 and removed in containerd v2.1.", + CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." + + "Use `config_path` instead.", + CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.1." + + "Use `ImagePullSecrets` instead.", + CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." + + "Use `config_path` instead.", + + TracingOTLPConfig: "The `otlp` property of `[plugins.\"io.containerd.tracing.processor.v1\".otlp]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." + + "Use OTLP environment variables instead: https://opentelemetry.io/docs/specs/otel/protocol/exporter/", + TracingServiceConfig: "The `tracing` property of `[plugins.\"io.containerd.internal.v1\".tracing]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." + + "Use OTEL environment variables instead: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/", +} + +// Valid checks whether a given Warning is valid +func Valid(id Warning) bool { + _, ok := messages[id] + return ok +} + +// Message returns the human-readable message for a given Warning +func Message(id Warning) (string, bool) { + msg, ok := messages[id] + return msg, ok +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go new file mode 100644 index 00000000..5a9c559c --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go new file mode 100644 index 00000000..dcc569a4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/filter.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +package filters + +import ( + "regexp" + + "github.com/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go new file mode 100644 index 00000000..e86ed8eb --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/parser.go @@ -0,0 +1,290 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/errdefs" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, fmt.Errorf("filters: %w", err) + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, "%s", p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, "%s", p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, "%s", p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return fmt.Errorf("parse error: %w", parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }) +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go new file mode 100644 index 00000000..5c800ef8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/quote.go @@ -0,0 +1,252 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "errors" + "unicode/utf8" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1. value, the decoded Unicode code point or byte value; +// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3. tail, the remainder of the string after the character; and +// 4. an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go b/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go new file mode 100644 index 00000000..6a485467 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go b/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go new file mode 100644 index 00000000..a8cec648 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + File copied and customized based on + https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go +*/ + +package kernelversion + +import ( + "bytes" + "fmt" + "sync" + + "golang.org/x/sys/unix" +) + +// KernelVersion holds information about the kernel. +type KernelVersion struct { + Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic") + Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic") +} + +// String implements fmt.Stringer for KernelVersion +func (k *KernelVersion) String() string { + if k.Kernel > 0 || k.Major > 0 { + return fmt.Sprintf("%d.%d", k.Kernel, k.Major) + } + return "" +} + +var ( + currentKernelVersion *KernelVersion + kernelVersionError error + once sync.Once +) + +// getKernelVersion gets the current kernel version. +func getKernelVersion() (*KernelVersion, error) { + once.Do(func() { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return + } + // Remove the \x00 from the release for Atoi to parse correctly + currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) + }) + return currentKernelVersion, kernelVersionError +} + +// parseRelease parses a string and creates a KernelVersion based on it. +func parseRelease(release string) (*KernelVersion, error) { + var version = KernelVersion{} + + // We're only make sure we get the "kernel" and "major revision". Sometimes we have + // 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64. + _, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major) + if err != nil { + return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err) + } + return &version, nil +} + +// GreaterEqualThan checks if the host's kernel version is greater than, or +// equal to the given kernel version v. Only "kernel version" and "major revision" +// can be specified (e.g., "3.12") and will be taken into account, which means +// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12). +func GreaterEqualThan(minVersion KernelVersion) (bool, error) { + kv, err := getKernelVersion() + if err != nil { + return false, err + } + if kv.Kernel > minVersion.Kernel { + return true, nil + } + if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go b/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go new file mode 100644 index 00000000..0f9bab5c --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/labels/labels.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +// LabelUncompressed is added to compressed layer contents. +// The value is digest of the uncompressed content. +const LabelUncompressed = "containerd.io/uncompressed" + +// LabelSharedNamespace is added to a namespace to allow that namespaces +// contents to be shared. +const LabelSharedNamespace = "containerd.io/namespace.shareable" + +// LabelDistributionSource is added to content to indicate its origin. +// e.g., "containerd.io/distribution.source.docker.io=library/redis" +const LabelDistributionSource = "containerd.io/distribution.source" diff --git a/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go b/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go new file mode 100644 index 00000000..6f23cdd7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/labels/validate.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "fmt" + + "github.com/containerd/errdefs" +) + +const ( + maxSize = 4096 + // maximum length of key portion of error message if len of key + len of value > maxSize + keyMaxLen = 64 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + total := len(k) + len(v) + if total > maxSize { + if len(k) > keyMaxLen { + k = k[:keyMaxLen] + } + return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go b/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go new file mode 100644 index 00000000..d983c4e1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go @@ -0,0 +1,156 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "errors" + "net/url" + "path" + "regexp" + "strings" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrInvalid is returned when there is an invalid reference + ErrInvalid = errors.New("invalid reference") + // ErrObjectRequired is returned when the object is required + ErrObjectRequired = errors.New("object required") + // ErrHostnameRequired is returned when the hostname is required + ErrHostnameRequired = errors.New("hostname required") +) + +// Spec defines the main components of a reference specification. +// +// A reference specification is a schema-less URI parsed into common +// components. The two main components, locator and object, are required to be +// supported by remotes. It represents a superset of the naming define in +// docker's reference schema. It aims to be compatible but not prescriptive. +// +// While the interpretation of the components, locator and object, are up to +// the remote, we define a few common parts, accessible via helper methods. +// +// The first is the hostname, which is part of the locator. This doesn't need +// to map to a physical resource, but it must parse as a hostname. We refer to +// this as the namespace. +// +// The other component made accessible by helper method is the digest. This is +// part of the object identifier, always prefixed with an '@'. If present, the +// remote may use the digest portion directly or resolve it against a prefix. +// If the object does not include the `@` symbol, the return value for `Digest` +// will be empty. +type Spec struct { + // Locator is the host and path portion of the specification. The host + // portion may refer to an actual host or just a namespace of related + // images. + // + // Typically, the locator may used to resolve the remote to fetch specific + // resources. + Locator string + + // Object contains the identifier for the remote resource. Classically, + // this is a tag but can refer to anything in a remote. By convention, any + // portion that may be a partial or whole digest will be preceded by an + // `@`. Anything preceding the `@` will be referred to as the "tag". + // + // In practice, we will see this broken down into the following formats: + // + // 1. + // 2. @ + // 3. @ + // + // We define the tag to be anything except '@' and ':'. may + // be a full valid digest or shortened version, possibly with elided + // algorithm. + Object string +} + +var splitRe = regexp.MustCompile(`[:@]`) + +// Parse parses the string into a structured ref. +func Parse(s string) (Spec, error) { + if strings.Contains(s, "://") { + return Spec{}, ErrInvalid + } + + u, err := url.Parse("dummy://" + s) + if err != nil { + return Spec{}, err + } + + if u.Scheme != "dummy" { + return Spec{}, ErrInvalid + } + + if u.Host == "" { + return Spec{}, ErrHostnameRequired + } + + var object string + + if idx := splitRe.FindStringIndex(u.Path); idx != nil { + // This allows us to retain the @ to signify digests or shortened digests in + // the object. + object = u.Path[idx[0]:] + if object[:1] == ":" { + object = object[1:] + } + u.Path = u.Path[:idx[0]] + } + + return Spec{ + Locator: path.Join(u.Host, u.Path), + Object: object, + }, nil +} + +// Hostname returns the hostname portion of the locator. +// +// Remotes are not required to directly access the resources at this host. This +// method is provided for convenience. +func (r Spec) Hostname() string { + i := strings.Index(r.Locator, "/") + + if i < 0 { + return r.Locator + } + return r.Locator[:i] +} + +// Digest returns the digest portion of the reference spec. This may be a +// partial or invalid digest, which may be used to lookup a complete digest. +func (r Spec) Digest() digest.Digest { + i := strings.Index(r.Object, "@") + + if i < 0 { + return "" + } + return digest.Digest(r.Object[i+1:]) +} + +// String returns the normalized string for the ref. +func (r Spec) String() string { + if r.Object == "" { + return r.Locator + } + if r.Object[:1] == "@" { + return r.Locator + r.Object + } + + return r.Locator + ":" + r.Object +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go new file mode 100644 index 00000000..ab1278ef --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/helpers.go @@ -0,0 +1,85 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "encoding/json" + "fmt" + + "go.opentelemetry.io/otel/attribute" +) + +func keyValue(k string, v any) attribute.KeyValue { + if v == nil { + return attribute.String(k, "") + } + + switch typed := v.(type) { + case bool: + return attribute.Bool(k, typed) + case []bool: + return attribute.BoolSlice(k, typed) + case int: + return attribute.Int(k, typed) + case []int: + return attribute.IntSlice(k, typed) + case int8: + return attribute.Int(k, int(typed)) + case []int8: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int16: + return attribute.Int(k, int(typed)) + case []int16: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int32: + return attribute.Int64(k, int64(typed)) + case []int32: + ls := make([]int64, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int64(i)) + } + return attribute.Int64Slice(k, ls) + case int64: + return attribute.Int64(k, typed) + case []int64: + return attribute.Int64Slice(k, typed) + case float64: + return attribute.Float64(k, typed) + case []float64: + return attribute.Float64Slice(k, typed) + case string: + return attribute.String(k, typed) + case []string: + return attribute.StringSlice(k, typed) + } + + if stringer, ok := v.(fmt.Stringer); ok { + return attribute.String(k, stringer.String()) + } + if b, err := json.Marshal(v); b != nil && err == nil { + return attribute.String(k, string(b)) + } + return attribute.String(k, fmt.Sprintf("%v", v)) +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go new file mode 100644 index 00000000..3af24a29 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/log.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "github.com/containerd/log" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// allLevels is the equivalent to [logrus.AllLevels]. +// +// [logrus.AllLevels]: https://github.com/sirupsen/logrus/blob/v1.9.3/logrus.go#L80-L89 +var allLevels = []log.Level{ + log.PanicLevel, + log.FatalLevel, + log.ErrorLevel, + log.WarnLevel, + log.InfoLevel, + log.DebugLevel, + log.TraceLevel, +} + +// NewLogrusHook creates a new logrus hook +func NewLogrusHook() *LogrusHook { + return &LogrusHook{} +} + +// LogrusHook is a [logrus.Hook] which adds logrus events to active spans. +// If the span is not recording or the span context is invalid, the hook +// is a no-op. +// +// [logrus.Hook]: https://github.com/sirupsen/logrus/blob/v1.9.3/hooks.go#L3-L11 +type LogrusHook struct{} + +// Levels returns the logrus levels that this hook is interested in. +func (h *LogrusHook) Levels() []log.Level { + return allLevels +} + +// Fire is called when a log event occurs. +func (h *LogrusHook) Fire(entry *log.Entry) error { + span := trace.SpanFromContext(entry.Context) + if span == nil { + return nil + } + + if !span.IsRecording() || !span.SpanContext().IsValid() { + return nil + } + + span.AddEvent( + entry.Message, + trace.WithAttributes(logrusDataToAttrs(entry.Data)...), + trace.WithAttributes(attribute.String("level", entry.Level.String())), + trace.WithTimestamp(entry.Time), + ) + + return nil +} + +func logrusDataToAttrs(data map[string]any) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0, len(data)) + for k, v := range data { + attrs = append(attrs, keyValue(k, v)) + } + return attrs +} diff --git a/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go b/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go new file mode 100644 index 00000000..48d760fe --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/pkg/tracing/tracing.go @@ -0,0 +1,132 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "context" + "net/http" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.21.0" + "go.opentelemetry.io/otel/trace" +) + +// StartConfig defines configuration for a new span object. +type StartConfig struct { + spanOpts []trace.SpanStartOption +} + +type SpanOpt func(config *StartConfig) + +// WithAttribute appends attributes to a new created span. +func WithAttribute(k string, v interface{}) SpanOpt { + return func(config *StartConfig) { + config.spanOpts = append(config.spanOpts, + trace.WithAttributes(Attribute(k, v))) + } +} + +// UpdateHTTPClient updates the http client with the necessary otel transport +func UpdateHTTPClient(client *http.Client, name string) { + client.Transport = otelhttp.NewTransport( + client.Transport, + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + return name + }), + ) +} + +// StartSpan starts child span in a context. +func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) { + config := StartConfig{} + for _, fn := range opts { + fn(&config) + } + tracer := otel.Tracer("") + if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { + tracer = parent.TracerProvider().Tracer("") + } + ctx, span := tracer.Start(ctx, opName, config.spanOpts...) + return ctx, &Span{otelSpan: span} +} + +// SpanFromContext returns the current Span from the context. +func SpanFromContext(ctx context.Context) *Span { + return &Span{ + otelSpan: trace.SpanFromContext(ctx), + } +} + +// Span is wrapper around otel trace.Span. +// Span is the individual component of a trace. It represents a +// single named and timed operation of a workflow that is traced. +type Span struct { + otelSpan trace.Span +} + +// End completes the span. +func (s *Span) End() { + s.otelSpan.End() +} + +// AddEvent adds an event with provided name and options. +func (s *Span) AddEvent(name string, attributes ...attribute.KeyValue) { + s.otelSpan.AddEvent(name, trace.WithAttributes(attributes...)) +} + +// RecordError will record err as an exception span event for this span +func (s *Span) RecordError(err error, options ...trace.EventOption) { + s.otelSpan.RecordError(err, options...) +} + +// SetStatus sets the status of the current span. +// If an error is encountered, it records the error and sets span status to Error. +func (s *Span) SetStatus(err error) { + if err != nil { + s.otelSpan.RecordError(err) + s.otelSpan.SetStatus(codes.Error, err.Error()) + } else { + s.otelSpan.SetStatus(codes.Ok, "") + } +} + +// SetAttributes sets kv as attributes of the span. +func (s *Span) SetAttributes(kv ...attribute.KeyValue) { + s.otelSpan.SetAttributes(kv...) +} + +const spanDelimiter = "." + +// Name sets the span name by joining a list of strings in dot separated format. +func Name(names ...string) string { + return strings.Join(names, spanDelimiter) +} + +// Attribute takes a key value pair and returns attribute.KeyValue type. +func Attribute(k string, v any) attribute.KeyValue { + return keyValue(k, v) +} + +// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry +// specification for a span. +func HTTPStatusCodeAttributes(code int) []attribute.KeyValue { + return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)} +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go new file mode 100644 index 00000000..39ec84b3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go @@ -0,0 +1,76 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "bufio" + "bytes" + "context" + _ "crypto/sha256" + "io" + "testing" + + "github.com/opencontainers/go-digest" + + "github.com/containerd/containerd/v2/core/content" +) + +func FuzzContentStoreWriter(data []byte) int { + t := &testing.T{} + ctx := context.Background() + ctx, _, cs, cleanup := contentStoreEnv(t) + defer cleanup() + + cw, err := cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + if err := cw.Close(); err != nil { + return 0 + } + + // reopen, so we can test things + cw, err = cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + + err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) + if err != nil { + return 0 + } + expected := digest.FromBytes(data) + + if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { + return 0 + } + return 1 +} + +func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { + nn, err := io.Copy(dst, src) + if err != nil { + return err + } + + if nn != size { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go new file mode 100644 index 00000000..80795add --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/locks.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "fmt" + "time" + + "github.com/containerd/errdefs" +) + +// Handles locking references + +type lock struct { + since time.Time +} + +func (s *store) tryLock(ref string) error { + s.locksMu.Lock() + defer s.locksMu.Unlock() + + if v, ok := s.locks[ref]; ok { + // Returning the duration may help developers distinguish dead locks (long duration) from + // lock contentions (short duration). + now := time.Now() + return fmt.Errorf( + "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since, + errdefs.ErrUnavailable, + ) + } + + s.locks[ref] = &lock{time.Now()} + return nil +} + +func (s *store) unlock(ref string) { + s.locksMu.Lock() + defer s.locksMu.Unlock() + + delete(s.locks, ref) +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go new file mode 100644 index 00000000..59198fd6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/readerat.go @@ -0,0 +1,72 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "fmt" + "io" + "os" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/errdefs" +) + +// readerat implements io.ReaderAt in a completely stateless manner by opening +// the referenced file for each call to ReadAt. +type sizeReaderAt struct { + size int64 + fp *os.File +} + +// OpenReader creates ReaderAt from a file +func OpenReader(p string) (content.ReaderAt, error) { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + +func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { + return ra.fp.ReadAt(p, offset) +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func (ra sizeReaderAt) Close() error { + return ra.fp.Close() +} + +func (ra sizeReaderAt) Reader() io.Reader { + return io.LimitReader(ra.fp, ra.size) +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go new file mode 100644 index 00000000..794c82c2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store.go @@ -0,0 +1,710 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/errdefs" + "github.com/containerd/log" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/internal/fsverity" + "github.com/containerd/containerd/v2/pkg/filters" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// LabelStore is used to store mutable labels for digests +type LabelStore interface { + // Get returns all the labels for the given digest + Get(digest.Digest) (map[string]string, error) + + // Set sets all the labels for a given digest + Set(digest.Digest, map[string]string) error + + // Update replaces the given labels for a digest, + // a key with an empty value removes a label. + Update(digest.Digest, map[string]string) (map[string]string, error) +} + +// Store is digest-keyed store for content. All data written into the store is +// stored under a verifiable digest. +// +// Store can generally support multi-reader, single-writer ingest of data, +// including resumable ingest. +type store struct { + root string + ls LabelStore + integritySupported bool + + locksMu sync.Mutex + locks map[string]*lock + ensureIngestRootOnce func() error +} + +// NewStore returns a local content store +func NewStore(root string) (content.Store, error) { + return NewLabeledStore(root, nil) +} + +// NewLabeledStore returns a new content store using the provided label store +// +// Note: content stores which are used underneath a metadata store may not +// require labels and should use `NewStore`. `NewLabeledStore` is primarily +// useful for tests or standalone implementations. +func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { + supported, _ := fsverity.IsSupported(root) + + s := &store{ + root: root, + ls: ls, + integritySupported: supported, + locks: map[string]*lock{}, + } + s.ensureIngestRootOnce = sync.OnceValue(s.ensureIngestRoot) + return s, nil +} + +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + p, err := s.blobPath(dgst) + if err != nil { + return content.Info{}, fmt.Errorf("calculating blob info path: %w", err) + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) + } + + return content.Info{}, err + } + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return content.Info{}, err + } + } + return s.info(dgst, fi, labels), nil +} + +func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { + return content.Info{ + Digest: dgst, + Size: fi.Size(), + CreatedAt: fi.ModTime(), + UpdatedAt: getATime(fi), + Labels: labels, + } +} + +// ReaderAt returns an io.ReaderAt for the blob. +func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + p, err := s.blobPath(desc.Digest) + if err != nil { + return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err) + } + + reader, err := OpenReader(p) + if err != nil { + return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err) + } + + return reader, nil +} + +// Delete removes a blob by its digest. +// +// While this is safe to do concurrently, safe exist-removal logic must hold +// some global lock on the store. +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + bp, err := s.blobPath(dgst) + if err != nil { + return fmt.Errorf("calculating blob path for delete: %w", err) + } + + if err := os.RemoveAll(bp); err != nil { + if !os.IsNotExist(err) { + return err + } + + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) + } + + return nil +} + +func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + if s.ls == nil { + return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition) + } + + p, err := s.blobPath(info.Digest) + if err != nil { + return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err) + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound) + } + + return content.Info{}, err + } + + var ( + all bool + labels map[string]string + ) + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if labels == nil { + labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + all = true + labels = info.Labels + default: + return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) + } + } + } else { + all = true + labels = info.Labels + } + + if all { + err = s.ls.Set(info.Digest, labels) + } else { + labels, err = s.ls.Update(info.Digest, labels) + } + if err != nil { + return content.Info{}, err + } + + info = s.info(info.Digest, fi, labels) + info.UpdatedAt = time.Now() + + if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { + log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) + } + + return info, nil +} + +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + root := filepath.Join(s.root, "blobs") + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + var alg digest.Algorithm + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if !fi.IsDir() && !alg.Available() { + return nil + } + + // TODO(stevvooe): There are few more cases with subdirs that should be + // handled in case the layout gets corrupted. This isn't strict enough + // and may spew bad data. + + if path == root { + return nil + } + if filepath.Dir(path) == root { + alg = digest.Algorithm(filepath.Base(path)) + + if !alg.Available() { + alg = "" + return filepath.SkipDir + } + + // descending into a hash directory + return nil + } + + dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path)) + if err := dgst.Validate(); err != nil { + // log error but don't report + log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") + // if we see this, it could mean some sort of corruption of the + // store or extra paths not expected previously. + } + + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return err + } + } + + info := s.info(dgst, fi, labels) + if !filter.Match(content.AdaptInfo(info)) { + return nil + } + return fn(info) + }) +} + +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { + return s.status(s.ingestRoot(ref)) +} + +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + defer fp.Close() + + fis, err := fp.Readdirnames(-1) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + var active []content.Status + for _, fi := range fis { + p := filepath.Join(s.root, "ingest", fi) + stat, err := s.status(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // TODO(stevvooe): This is a common error if uploads are being + // completed while making this listing. Need to consider taking a + // lock on the whole store to coordinate this aspect. + // + // Another option is to cleanup downloads asynchronously and + // coordinate this method with the cleanup process. + // + // For now, we just skip them, as they really don't exist. + continue + } + + if filter.Match(adaptStatus(stat)) { + active = append(active, stat) + } + } + + return active, nil +} + +// WalkStatusRefs is used to walk all status references +// Failed status reads will be logged and ignored, if +// this function is called while references are being altered, +// these error messages may be produced. +func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer fp.Close() + + fis, err := fp.Readdirnames(-1) + if err != nil { + return err + } + + for _, fi := range fis { + rf := filepath.Join(s.root, "ingest", fi, "ref") + + ref, err := readFileString(rf) + if err != nil { + log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") + continue + } + + if err := fn(ref); err != nil { + return err + } + } + + return nil +} + +// status works like stat above except uses the path to the ingest. +func (s *store) status(ingestPath string) (content.Status, error) { + dp := filepath.Join(ingestPath, "data") + fi, err := os.Stat(dp) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return content.Status{}, err + } + + ref, err := readFileString(filepath.Join(ingestPath, "ref")) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return content.Status{}, err + } + + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, fmt.Errorf("could not read startedat: %w", err) + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, fmt.Errorf("could not read updatedat: %w", err) + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + + return content.Status{ + Ref: ref, + Offset: fi.Size(), + Total: s.total(ingestPath), + UpdatedAt: updatedAt, + StartedAt: startedAt, + }, nil +} + +func adaptStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +// total attempts to resolve the total expected size for the write. +func (s *store) total(ingestPath string) int64 { + totalS, err := readFileString(filepath.Join(ingestPath, "total")) + if err != nil { + return 0 + } + + total, err := strconv.ParseInt(totalS, 10, 64) + if err != nil { + // represents a corrupted file, should probably remove. + return 0 + } + + return total +} + +// Writer begins or resumes the active writer identified by ref. If the writer +// is already in use, an error is returned. Only one writer may be in use per +// ref at a time. +// +// The argument `ref` is used to uniquely identify a long-lived writer transaction. +func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // TODO(AkihiroSuda): we could create a random string or one calculated based on the context + // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 + if wOpts.Ref == "" { + return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) + } + + if err := s.tryLock(wOpts.Ref); err != nil { + return nil, err + } + + w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest) + if err != nil { + s.unlock(wOpts.Ref) + return nil, err + } + + return w, nil // lock is now held by w. +} + +func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) { + path, _, data := s.ingestPaths(ref) + status, err := s.status(path) + if err != nil { + return status, fmt.Errorf("failed reading status of resume write: %w", err) + } + if ref != status.Ref { + // NOTE(stevvooe): This is fairly catastrophic. Either we have some + // layout corruption or a hash collision for the ref key. + return status, fmt.Errorf("ref key does not match: %v != %v", ref, status.Ref) + } + + if total > 0 && status.Total > 0 && total != status.Total { + return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total) + } + + //nolint:dupword + // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes + fp, err := os.Open(data) + if err != nil { + return status, err + } + + p := bufPool.Get().(*[]byte) + status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + bufPool.Put(p) + fp.Close() + return status, err +} + +// writer provides the main implementation of the Writer method. The caller +// must hold the lock correctly and release on error if there is a problem. +func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + // TODO(stevvooe): Need to actually store expected here. We have + // code in the service that shouldn't be dealing with this. + if expected != "" { + p, err := s.blobPath(expected) + if err != nil { + return nil, fmt.Errorf("calculating expected blob path for writer: %w", err) + } + if _, err := os.Stat(p); err == nil { + return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists) + } + } + + path, refp, data := s.ingestPaths(ref) + + var ( + digester = digest.Canonical.Digester() + offset int64 + startedAt time.Time + updatedAt time.Time + ) + + foundValidIngest := false + + if err := s.ensureIngestRootOnce(); err != nil { + return nil, err + } + + // ensure that the ingest path has been created. + if err := os.Mkdir(path, 0755); err != nil { + if !os.IsExist(err) { + return nil, err + } + status, err := s.resumeStatus(ref, total, digester) + if err == nil { + foundValidIngest = true + updatedAt = status.UpdatedAt + startedAt = status.StartedAt + total = status.Total + offset = status.Offset + } else { + log.G(ctx).Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) + } + } + + if !foundValidIngest { + startedAt = time.Now() + updatedAt = startedAt + + // the ingest is new, we need to setup the target location. + // write the ref to a file for later use + if err := os.WriteFile(refp, []byte(ref), 0666); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { + return nil, err + } + + if total > 0 { + if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + return nil, err + } + } + } + + fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, fmt.Errorf("failed to open data file: %w", err) + } + + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + fp.Close() + return nil, fmt.Errorf("could not seek to current write offset: %w", err) + } + + return &writer{ + s: s, + fp: fp, + ref: ref, + path: path, + offset: offset, + total: total, + digester: digester, + startedAt: startedAt, + updatedAt: updatedAt, + }, nil +} + +// Abort an active transaction keyed by ref. If the ingest is active, it will +// be cancelled. Any resources associated with the ingest will be cleaned. +func (s *store) Abort(ctx context.Context, ref string) error { + root := s.ingestRoot(ref) + if err := os.RemoveAll(root); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound) + } + + return err + } + + return nil +} + +func (s *store) blobPath(dgst digest.Digest) (string, error) { + if err := dgst.Validate(); err != nil { + return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) + } + + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil +} + +func (s *store) ingestRoot(ref string) string { + // we take a digest of the ref to keep the ingest paths constant length. + // Note that this is not the current or potential digest of incoming content. + dgst := digest.FromString(ref) + return filepath.Join(s.root, "ingest", dgst.Encoded()) +} + +// ingestPaths are returned. The paths are the following: +// +// - root: entire ingest directory +// - ref: name of the starting ref, must be unique +// - data: file where data is written +func (s *store) ingestPaths(ref string) (string, string, string) { + var ( + fp = s.ingestRoot(ref) + rp = filepath.Join(fp, "ref") + dp = filepath.Join(fp, "data") + ) + + return fp, rp, dp +} + +func (s *store) ensureIngestRoot() error { + return os.MkdirAll(filepath.Join(s.root, "ingest"), 0777) +} + +func readFileString(path string) (string, error) { + p, err := os.ReadFile(path) + return string(p), err +} + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := os.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + return writeToCompletion(p, b, 0666) +} + +func writeToCompletion(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return fmt.Errorf("create tmp file: %w", err) + } + _, err = f.Write(data) + f.Close() + if err != nil { + return fmt.Errorf("write tmp file: %w", err) + } + err = os.Rename(tmp, path) + if err != nil { + return fmt.Errorf("rename tmp file: %w", err) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go new file mode 100644 index 00000000..7dcc1923 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_bsd.go @@ -0,0 +1,33 @@ +//go:build darwin || freebsd || netbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atimespec.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go new file mode 100644 index 00000000..45dfa999 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_openbsd.go @@ -0,0 +1,33 @@ +//go:build openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atim.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go new file mode 100644 index 00000000..cb01c91c --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_unix.go @@ -0,0 +1,33 @@ +//go:build linux || solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(st.Atim.Unix()) + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go new file mode 100644 index 00000000..bce84997 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/store_windows.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go new file mode 100644 index 00000000..a4c238c4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "testing" + + "github.com/containerd/containerd/v2/core/content" +) + +func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { + tmpdir := t.TempDir() + + cs, err := NewStore(tmpdir) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + return ctx, tmpdir, cs, func() { + cancel() + } +} diff --git a/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go b/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go new file mode 100644 index 00000000..ef33e7d5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/plugins/content/local/writer.go @@ -0,0 +1,226 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/containerd/errdefs" + "github.com/containerd/log" + "github.com/opencontainers/go-digest" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/internal/fsverity" +) + +// writer represents a write transaction against the blob store. +type writer struct { + s *store + fp *os.File // opened data file + path string // path to writer dir + ref string // ref key + offset int64 + total int64 + digester digest.Digester + startedAt time.Time + updatedAt time.Time +} + +func (w *writer) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *writer) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +// +// Note that writes are unbuffered to the backing file. When writing, it is +// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. +func (w *writer) Write(p []byte) (n int, err error) { + n, err = w.fp.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Ensure even on error the writer is fully closed + defer w.s.unlock(w.ref) + + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + fp := w.fp + w.fp = nil + + if fp == nil { + return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition) + } + + if err := fp.Sync(); err != nil { + fp.Close() + return fmt.Errorf("sync failed: %w", err) + } + + fi, err := fp.Stat() + closeErr := fp.Close() + if err != nil { + return fmt.Errorf("stat on ingest file failed: %w", err) + } + if closeErr != nil { + return fmt.Errorf("failed to close ingest file: %w", closeErr) + } + + if size > 0 && size != fi.Size() { + return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition) + } + + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition) + } + + var ( + ingest = filepath.Join(w.path, "data") + target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst + ) + + // make sure parent directories of blob exist + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + if _, err := os.Stat(target); err == nil { + // collision with the target file! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") + } + return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists) + } + + if err := os.Rename(ingest, target); err != nil { + return err + } + + // Enable content blob integrity verification if supported + + if w.s.integritySupported { + if err := fsverity.Enable(target); err != nil { + log.G(ctx).Warnf("failed to enable integrity for blob %v: %s", target, err.Error()) + } + } + + // Ingest has now been made available in the content store, attempt to complete + // setting metadata but errors should only be logged and not returned since + // the content store cannot be cleanly rolled back. + + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time") + } + + // clean up!! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") + } + + if w.s.ls != nil && base.Labels != nil { + if err := w.s.ls.Set(dgst, base.Labels); err != nil { + log.G(ctx).WithField("digest", dgst).Error("failed to set labels") + } + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { + log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly") + } + } + + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +// +// If one needs to resume the transaction, a new writer can be obtained from +// `Ingester.Writer` using the same key. The write can then be continued +// from it was left off. +// +// To abandon a transaction completely, first call close then `IngestManager.Abort` to +// clean up the associated resources. +func (w *writer) Close() (err error) { + if w.fp != nil { + w.fp.Sync() + err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) + w.fp = nil + w.s.unlock(w.ref) + return + } + + return nil +} + +func (w *writer) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + if _, err := w.fp.Seek(0, io.SeekStart); err != nil { + return err + } + return w.fp.Truncate(0) +} + +func (w *writer) Sync() error { + if w.fp != nil { + return w.fp.Sync() + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/v2/version/version.go b/vendor/github.com/containerd/containerd/v2/version/version.go new file mode 100644 index 00000000..23d820a9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/v2/version/version.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import "runtime" + +var ( + Name = "containerd" + // Package is filled at linking time + Package = "github.com/containerd/containerd/v2" + + // Version holds the complete version number. Filled in at linking time. + Version = "2.0.4+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" + + // GoVersion is Go tree's version. + GoVersion = runtime.Version() +) + +// ConfigVersion is the current highest supported configuration version. +// This version is used by the main configuration as well as all plugins. +// Any configuration less than this version which has structural changes +// should migrate the configuration structures used by this version. +const ConfigVersion = 3 diff --git a/vendor/github.com/containerd/errdefs/LICENSE b/vendor/github.com/containerd/errdefs/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/errdefs/README.md b/vendor/github.com/containerd/errdefs/README.md new file mode 100644 index 00000000..bd418c63 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/README.md @@ -0,0 +1,13 @@ +# errdefs + +A Go package for defining and checking common containerd errors. + +## Project details + +**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/errdefs/errors.go b/vendor/github.com/containerd/errdefs/errors.go new file mode 100644 index 00000000..f654d196 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/errors.go @@ -0,0 +1,443 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// These errors map closely to grpc errors. +var ( + ErrUnknown = errUnknown{} + ErrInvalidArgument = errInvalidArgument{} + ErrNotFound = errNotFound{} + ErrAlreadyExists = errAlreadyExists{} + ErrPermissionDenied = errPermissionDenied{} + ErrResourceExhausted = errResourceExhausted{} + ErrFailedPrecondition = errFailedPrecondition{} + ErrConflict = errConflict{} + ErrNotModified = errNotModified{} + ErrAborted = errAborted{} + ErrOutOfRange = errOutOfRange{} + ErrNotImplemented = errNotImplemented{} + ErrInternal = errInternal{} + ErrUnavailable = errUnavailable{} + ErrDataLoss = errDataLoss{} + ErrUnauthenticated = errUnauthorized{} +) + +// cancelled maps to Moby's "ErrCancelled" +type cancelled interface { + Cancelled() +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) || isInterface[cancelled](err) +} + +type errUnknown struct{} + +func (errUnknown) Error() string { return "unknown" } + +func (errUnknown) Unknown() {} + +func (e errUnknown) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unknown maps to Moby's "ErrUnknown" +type unknown interface { + Unknown() +} + +// IsUnknown returns true if the error is due to an unknown error, +// unhandled condition or unexpected response. +func IsUnknown(err error) bool { + return errors.Is(err, errUnknown{}) || isInterface[unknown](err) +} + +type errInvalidArgument struct{} + +func (errInvalidArgument) Error() string { return "invalid argument" } + +func (errInvalidArgument) InvalidParameter() {} + +func (e errInvalidArgument) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// invalidParameter maps to Moby's "ErrInvalidParameter" +type invalidParameter interface { + InvalidParameter() +} + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) || isInterface[invalidParameter](err) +} + +// deadlineExceed maps to Moby's "ErrDeadline" +type deadlineExceeded interface { + DeadlineExceeded() +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) || isInterface[deadlineExceeded](err) +} + +type errNotFound struct{} + +func (errNotFound) Error() string { return "not found" } + +func (errNotFound) NotFound() {} + +func (e errNotFound) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notFound maps to Moby's "ErrNotFound" +type notFound interface { + NotFound() +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) || isInterface[notFound](err) +} + +type errAlreadyExists struct{} + +func (errAlreadyExists) Error() string { return "already exists" } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type alreadyExists interface { + AlreadyExists() +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) || isInterface[alreadyExists](err) +} + +type errPermissionDenied struct{} + +func (errPermissionDenied) Error() string { return "permission denied" } + +func (errPermissionDenied) Forbidden() {} + +func (e errPermissionDenied) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// forbidden maps to Moby's "ErrForbidden" +type forbidden interface { + Forbidden() +} + +// IsPermissionDenied returns true if the error is due to permission denied +// or forbidden (403) response +func IsPermissionDenied(err error) bool { + return errors.Is(err, ErrPermissionDenied) || isInterface[forbidden](err) +} + +type errResourceExhausted struct{} + +func (errResourceExhausted) Error() string { return "resource exhausted" } + +func (errResourceExhausted) ResourceExhausted() {} + +func (e errResourceExhausted) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type resourceExhausted interface { + ResourceExhausted() +} + +// IsResourceExhausted returns true if the error is due to +// a lack of resources or too many attempts. +func IsResourceExhausted(err error) bool { + return errors.Is(err, errResourceExhausted{}) || isInterface[resourceExhausted](err) +} + +type errFailedPrecondition struct{} + +func (e errFailedPrecondition) Error() string { return "failed precondition" } + +func (errFailedPrecondition) FailedPrecondition() {} + +func (e errFailedPrecondition) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type failedPrecondition interface { + FailedPrecondition() +} + +// IsFailedPrecondition returns true if an operation could not proceed due to +// the lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, errFailedPrecondition{}) || isInterface[failedPrecondition](err) +} + +type errConflict struct{} + +func (errConflict) Error() string { return "conflict" } + +func (errConflict) Conflict() {} + +func (e errConflict) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// conflict maps to Moby's "ErrConflict" +type conflict interface { + Conflict() +} + +// IsConflict returns true if an operation could not proceed due to +// a conflict. +func IsConflict(err error) bool { + return errors.Is(err, errConflict{}) || isInterface[conflict](err) +} + +type errNotModified struct{} + +func (errNotModified) Error() string { return "not modified" } + +func (errNotModified) NotModified() {} + +func (e errNotModified) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notModified maps to Moby's "ErrNotModified" +type notModified interface { + NotModified() +} + +// IsNotModified returns true if an operation could not proceed due +// to an object not modified from a previous state. +func IsNotModified(err error) bool { + return errors.Is(err, errNotModified{}) || isInterface[notModified](err) +} + +type errAborted struct{} + +func (errAborted) Error() string { return "aborted" } + +func (errAborted) Aborted() {} + +func (e errAborted) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type aborted interface { + Aborted() +} + +// IsAborted returns true if an operation was aborted. +func IsAborted(err error) bool { + return errors.Is(err, errAborted{}) || isInterface[aborted](err) +} + +type errOutOfRange struct{} + +func (errOutOfRange) Error() string { return "out of range" } + +func (errOutOfRange) OutOfRange() {} + +func (e errOutOfRange) WithMessage(msg string) error { + return customMessage{e, msg} +} + +type outOfRange interface { + OutOfRange() +} + +// IsOutOfRange returns true if an operation could not proceed due +// to data being out of the expected range. +func IsOutOfRange(err error) bool { + return errors.Is(err, errOutOfRange{}) || isInterface[outOfRange](err) +} + +type errNotImplemented struct{} + +func (errNotImplemented) Error() string { return "not implemented" } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// notImplemented maps to Moby's "ErrNotImplemented" +type notImplemented interface { + NotImplemented() +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, errNotImplemented{}) || isInterface[notImplemented](err) +} + +type errInternal struct{} + +func (errInternal) Error() string { return "internal" } + +func (errInternal) System() {} + +func (e errInternal) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// system maps to Moby's "ErrSystem" +type system interface { + System() +} + +// IsInternal returns true if the error returns to an internal or system error +func IsInternal(err error) bool { + return errors.Is(err, errInternal{}) || isInterface[system](err) +} + +type errUnavailable struct{} + +func (errUnavailable) Error() string { return "unavailable" } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unavailable maps to Moby's "ErrUnavailable" +type unavailable interface { + Unavailable() +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, errUnavailable{}) || isInterface[unavailable](err) +} + +type errDataLoss struct{} + +func (errDataLoss) Error() string { return "data loss" } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// dataLoss maps to Moby's "ErrDataLoss" +type dataLoss interface { + DataLoss() +} + +// IsDataLoss returns true if data during an operation was lost or corrupted +func IsDataLoss(err error) bool { + return errors.Is(err, errDataLoss{}) || isInterface[dataLoss](err) +} + +type errUnauthorized struct{} + +func (errUnauthorized) Error() string { return "unauthorized" } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) WithMessage(msg string) error { + return customMessage{e, msg} +} + +// unauthorized maps to Moby's "ErrUnauthorized" +type unauthorized interface { + Unauthorized() +} + +// IsUnauthorized returns true if the error indicates that the user was +// unauthenticated or unauthorized. +func IsUnauthorized(err error) bool { + return errors.Is(err, errUnauthorized{}) || isInterface[unauthorized](err) +} + +func isInterface[T any](err error) bool { + for { + switch x := err.(type) { + case T: + return true + case customMessage: + err = x.err + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return false + } + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + if isInterface[T](err) { + return true + } + } + return false + default: + return false + } + } +} + +// customMessage is used to provide a defined error with a custom message. +// The message is not wrapped but can be compared by the `Is(error) bool` interface. +type customMessage struct { + err error + msg string +} + +func (c customMessage) Is(err error) bool { + return c.err == err +} + +func (c customMessage) As(target any) bool { + return errors.As(c.err, target) +} + +func (c customMessage) Error() string { + return c.msg +} diff --git a/vendor/github.com/containerd/errdefs/resolve.go b/vendor/github.com/containerd/errdefs/resolve.go new file mode 100644 index 00000000..c02d4a73 --- /dev/null +++ b/vendor/github.com/containerd/errdefs/resolve.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import "context" + +// Resolve returns the first error found in the error chain which matches an +// error defined in this package or context error. A raw, unwrapped error is +// returned or ErrUnknown if no matching error is found. +// +// This is useful for determining a response code based on the outermost wrapped +// error rather than the original cause. For example, a not found error deep +// in the code may be wrapped as an invalid argument. When determining status +// code from Is* functions, the depth or ordering of the error is not +// considered. +// +// The search order is depth first, a wrapped error returned from any part of +// the chain from `Unwrap() error` will be returned before any joined errors +// as returned by `Unwrap() []error`. +func Resolve(err error) error { + if err == nil { + return nil + } + err = firstError(err) + if err == nil { + err = ErrUnknown + } + return err +} + +func firstError(err error) error { + for { + switch err { + case ErrUnknown, + ErrInvalidArgument, + ErrNotFound, + ErrAlreadyExists, + ErrPermissionDenied, + ErrResourceExhausted, + ErrFailedPrecondition, + ErrConflict, + ErrNotModified, + ErrAborted, + ErrOutOfRange, + ErrNotImplemented, + ErrInternal, + ErrUnavailable, + ErrDataLoss, + ErrUnauthenticated, + context.DeadlineExceeded, + context.Canceled: + return err + } + switch e := err.(type) { + case customMessage: + err = e.err + case unknown: + return ErrUnknown + case invalidParameter: + return ErrInvalidArgument + case notFound: + return ErrNotFound + case alreadyExists: + return ErrAlreadyExists + case forbidden: + return ErrPermissionDenied + case resourceExhausted: + return ErrResourceExhausted + case failedPrecondition: + return ErrFailedPrecondition + case conflict: + return ErrConflict + case notModified: + return ErrNotModified + case aborted: + return ErrAborted + case errOutOfRange: + return ErrOutOfRange + case notImplemented: + return ErrNotImplemented + case system: + return ErrInternal + case unavailable: + return ErrUnavailable + case dataLoss: + return ErrDataLoss + case unauthorized: + return ErrUnauthenticated + case deadlineExceeded: + return context.DeadlineExceeded + case cancelled: + return context.Canceled + case interface{ Unwrap() error }: + err = e.Unwrap() + if err == nil { + return nil + } + case interface{ Unwrap() []error }: + for _, ue := range e.Unwrap() { + if fe := firstError(ue); fe != nil { + return fe + } + } + return nil + case interface{ Is(error) bool }: + for _, target := range []error{ErrUnknown, + ErrInvalidArgument, + ErrNotFound, + ErrAlreadyExists, + ErrPermissionDenied, + ErrResourceExhausted, + ErrFailedPrecondition, + ErrConflict, + ErrNotModified, + ErrAborted, + ErrOutOfRange, + ErrNotImplemented, + ErrInternal, + ErrUnavailable, + ErrDataLoss, + ErrUnauthenticated, + context.DeadlineExceeded, + context.Canceled} { + if e.Is(target) { + return target + } + } + return nil + default: + return nil + } + } +} diff --git a/vendor/github.com/containerd/platforms/.gitattributes b/vendor/github.com/containerd/platforms/.gitattributes new file mode 100644 index 00000000..a0717e4b --- /dev/null +++ b/vendor/github.com/containerd/platforms/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/.golangci.yml b/vendor/github.com/containerd/platforms/.golangci.yml new file mode 100644 index 00000000..d574fe11 --- /dev/null +++ b/vendor/github.com/containerd/platforms/.golangci.yml @@ -0,0 +1,32 @@ +linters: + enable: + - copyloopvar + - gofmt + - goimports + - gosec + - ineffassign + - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - govet + - dupword # Checks for duplicate words in the source code + disable: + - errcheck + +run: + timeout: 5m + +issues: + exclude-dirs: + - api + - cluster + - design + - docs + - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/platforms/LICENSE b/vendor/github.com/containerd/platforms/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/platforms/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/platforms/README.md b/vendor/github.com/containerd/platforms/README.md new file mode 100644 index 00000000..2059de77 --- /dev/null +++ b/vendor/github.com/containerd/platforms/README.md @@ -0,0 +1,32 @@ +# platforms + +A Go package for formatting, normalizing and matching container platforms. + +This package is based on the Open Containers Image Spec definition of a [platform](https://github.com/opencontainers/image-spec/blob/main/specs-go/v1/descriptor.go#L52). + +## Platform Specifier + +While the OCI platform specifications provide a tool for components to +specify structured information, user input typically doesn't need the full +context and much can be inferred. To solve this problem, this package introduces +"specifiers". A specifier has the format +`||/[/]`. The user can provide either the +operating system or the architecture or both. + +An example of a common specifier is `linux/amd64`. If the host has a default +runtime that matches this, the user can simply provide the component that +matters. For example, if an image provides `amd64` and `arm64` support, the +operating system, `linux` can be inferred, so they only have to provide +`arm64` or `amd64`. Similar behavior is implemented for operating systems, +where the architecture may be known but a runtime may support images from +different operating systems. + +## Project details + +**platforms** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. \ No newline at end of file diff --git a/vendor/github.com/containerd/platforms/compare.go b/vendor/github.com/containerd/platforms/compare.go new file mode 100644 index 00000000..24403f3b --- /dev/null +++ b/vendor/github.com/containerd/platforms/compare.go @@ -0,0 +1,260 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +type platformVersions struct { + major []int + minor []int +} + +var arm64variantToVersion = map[string]platformVersions{ + "v8": {[]int{8}, []int{0}}, + "v8.0": {[]int{8}, []int{0}}, + "v8.1": {[]int{8}, []int{1}}, + "v8.2": {[]int{8}, []int{2}}, + "v8.3": {[]int{8}, []int{3}}, + "v8.4": {[]int{8}, []int{4}}, + "v8.5": {[]int{8}, []int{5}}, + "v8.6": {[]int{8}, []int{6}}, + "v8.7": {[]int{8}, []int{7}}, + "v8.8": {[]int{8}, []int{8}}, + "v8.9": {[]int{8}, []int{9}}, + "v9": {[]int{9, 8}, []int{0, 5}}, + "v9.0": {[]int{9, 8}, []int{0, 5}}, + "v9.1": {[]int{9, 8}, []int{1, 6}}, + "v9.2": {[]int{9, 8}, []int{2, 7}}, + "v9.3": {[]int{9, 8}, []int{3, 8}}, + "v9.4": {[]int{9, 8}, []int{4, 9}}, + "v9.5": {[]int{9, 8}, []int{5, 9}}, + "v9.6": {[]int{9, 8}, []int{6, 9}}, + "v9.7": {[]int{9, 8}, []int{7, 9}}, +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + + vector = []specs.Platform{} // Reset vector, the first variant will be added in loop. + arm64Versions, ok := arm64variantToVersion[variant] + if !ok { + break + } + for i, major := range arm64Versions.major { + for minor := arm64Versions.minor[i]; minor >= 0; minor-- { + arm64Variant := "v" + strconv.Itoa(major) + "." + strconv.Itoa(minor) + if minor == 0 { + arm64Variant = "v" + strconv.Itoa(major) + } + vector = append(vector, specs.Platform{ + Architecture: "arm64", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: arm64Variant, + }) + } + } + + // All arm64/v8.x and arm64/v9.x are compatible with arm/v8 (32-bits) and below. + // There's no arm64 v9 variant, so it's normalized to v8. + if strings.HasPrefix(variant, "v8") || strings.HasPrefix(variant, "v9") { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm64/v9.x, will also match arm64/v9.{0..x-1} and arm64/v8.{0..x+5} +// For arm64/v8.x, will also match arm64/v8.{0..x-1} +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/platforms/cpuinfo.go new file mode 100644 index 00000000..91f50e8c --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "sync" + + "github.com/containerd/log" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + var err error + cpuVariantValue, err = getCPUVariant() + if err != nil { + log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) + } + } + }) + return cpuVariantValue +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/platforms/cpuinfo_linux.go new file mode 100644 index 00000000..98c7001f --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo_linux.go @@ -0,0 +1,160 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strings" + + "golang.org/x/sys/unix" +) + +// getMachineArch retrieves the machine architecture through system call +func getMachineArch() (string, error) { + var uname unix.Utsname + err := unix.Uname(&uname) + if err != nil { + return "", err + } + + arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) + + return arch, nil +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errNotFound) +} + +// getCPUVariantFromArch get CPU variant from arch through a system call +func getCPUVariantFromArch(arch string) (string, error) { + + var variant string + + arch = strings.ToLower(arch) + + if arch == "aarch64" { + variant = "8" + } else if arch[0:4] == "armv" && len(arch) >= 5 { + // Valid arch format is in form of armvXx + switch arch[3:5] { + case "v8": + variant = "8" + case "v7": + variant = "7" + case "v6": + variant = "6" + case "v5": + variant = "5" + case "v4": + variant = "4" + case "v3": + variant = "3" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errInvalidArgument) + } + return variant, nil +} + +// getCPUVariant returns cpu variant for ARM +// We first try reading "Cpu architecture" field from /proc/cpuinfo +// If we can't find it, then fall back using a system call +// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo +// was not present. +func getCPUVariant() (string, error) { + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + if errors.Is(err, errNotFound) { + // Let's try getting CPU variant from machine architecture + arch, err := getMachineArch() + if err != nil { + return "", fmt.Errorf("failure getting machine architecture: %v", err) + } + + variant, err = getCPUVariantFromArch(arch) + if err != nil { + return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) + } + } else { + return "", fmt.Errorf("failure getting CPU variant: %v", err) + } + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/platforms/cpuinfo_other.go new file mode 100644 index 00000000..97a1fe8a --- /dev/null +++ b/vendor/github.com/containerd/platforms/cpuinfo_other.go @@ -0,0 +1,55 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" +) + +func getCPUVariant() (string, error) { + + var variant string + + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + } else if runtime.GOOS == "freebsd" { + // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) + // detecting those variants is currently unimplemented + switch runtime.GOARCH { + case "arm64": + variant = "v8" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errNotImplemented) + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/platforms/database.go b/vendor/github.com/containerd/platforms/database.go new file mode 100644 index 00000000..7a6f0d98 --- /dev/null +++ b/vendor/github.com/containerd/platforms/database.go @@ -0,0 +1,111 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64", "amd64": + arch = "amd64" + if variant == "v1" { + variant = "" + } + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8", "v8.0": + variant = "" + case "9", "9.0", "v9.0": + variant = "v9" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/platforms/defaults.go b/vendor/github.com/containerd/platforms/defaults.go new file mode 100644 index 00000000..9d898d60 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults.go @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// DefaultString returns the default string specifier for the platform, +// with [PR#6](https://github.com/containerd/platforms/pull/6) the result +// may now also include the OSVersion from the provided platform specification. +func DefaultString() string { + return FormatAll(DefaultSpec()) +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/platforms/defaults_darwin.go new file mode 100644 index 00000000..72355ca8 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_darwin.go @@ -0,0 +1,44 @@ +//go:build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/platforms/defaults_freebsd.go new file mode 100644 index 00000000..d3fe89e0 --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_freebsd.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + }) +} diff --git a/vendor/github.com/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/platforms/defaults_unix.go new file mode 100644 index 00000000..44acc47e --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_unix.go @@ -0,0 +1,40 @@ +//go:build !windows && !darwin && !freebsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/platforms/defaults_windows.go new file mode 100644 index 00000000..0165adea --- /dev/null +++ b/vendor/github.com/containerd/platforms/defaults_windows.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/platforms/errors.go b/vendor/github.com/containerd/platforms/errors.go new file mode 100644 index 00000000..5ad721e7 --- /dev/null +++ b/vendor/github.com/containerd/platforms/errors.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import "errors" + +// These errors mirror the errors defined in [github.com/containerd/containerd/errdefs], +// however, they are not exported as they are not expected to be used as sentinel +// errors by consumers of this package. +// +//nolint:unused // not all errors are used on all platforms. +var ( + errNotFound = errors.New("not found") + errInvalidArgument = errors.New("invalid argument") + errNotImplemented = errors.New("not implemented") +) diff --git a/vendor/github.com/containerd/platforms/platform_windows_compat.go b/vendor/github.com/containerd/platforms/platform_windows_compat.go new file mode 100644 index 00000000..7f3d9966 --- /dev/null +++ b/vendor/github.com/containerd/platforms/platform_windows_compat.go @@ -0,0 +1,156 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// windowsOSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type windowsOSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information +const ( + // rs5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + rs5 = 17763 + + // v21H2Server corresponds to Windows Server 2022 (ltsc2022). + v21H2Server = 20348 + + // v22H2Win11 corresponds to Windows 11 (2022 Update). + v22H2Win11 = 22621 +) + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + v21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func checkWindowsHostAndContainerCompat(host, ctr windowsOSVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < v21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} + +func getWindowsOSVersion(osVersionPrefix string) windowsOSVersion { + if strings.Count(osVersionPrefix, ".") < 2 { + return windowsOSVersion{} + } + + major, extra, _ := strings.Cut(osVersionPrefix, ".") + minor, extra, _ := strings.Cut(extra, ".") + build, _, _ := strings.Cut(extra, ".") + + majorVersion, err := strconv.ParseUint(major, 10, 8) + if err != nil { + return windowsOSVersion{} + } + + minorVersion, err := strconv.ParseUint(minor, 10, 8) + if err != nil { + return windowsOSVersion{} + } + buildNumber, err := strconv.ParseUint(build, 10, 16) + if err != nil { + return windowsOSVersion{} + } + + return windowsOSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + +func winRevision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +type windowsVersionMatcher struct { + windowsOSVersion +} + +func (m windowsVersionMatcher) Match(v string) bool { + if m.isEmpty() || v == "" { + return true + } + osv := getWindowsOSVersion(v) + return checkWindowsHostAndContainerCompat(m.windowsOSVersion, osv) +} + +func (m windowsVersionMatcher) isEmpty() bool { + return m.MajorVersion == 0 && m.MinorVersion == 0 && m.Build == 0 +} + +type windowsMatchComparer struct { + Matcher +} + +func (c *windowsMatchComparer) Less(p1, p2 specs.Platform) bool { + m1, m2 := c.Match(p1), c.Match(p2) + if m1 && m2 { + r1, r2 := winRevision(p1.OSVersion), winRevision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} diff --git a/vendor/github.com/containerd/platforms/platforms.go b/vendor/github.com/containerd/platforms/platforms.go new file mode 100644 index 00000000..14d65abd --- /dev/null +++ b/vendor/github.com/containerd/platforms/platforms.go @@ -0,0 +1,341 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// # Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// # Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// # Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// # ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// Similarly, the most common arm64 version v8, and most common amd64 version v1 +// are represented without the variant. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "fmt" + "path" + "regexp" + "runtime" + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_.-]+$`) + osAndVersionRe = regexp.MustCompile(`^([A-Za-z0-9_-]+)(?:\(([A-Za-z0-9_.-]*)\))?$`) +) + +const osAndVersionFormat = "%s(%s)" + +// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. +type Platform = specs.Platform + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + m := &matcher{ + Platform: Normalize(platform), + } + + if platform.OS == "windows" { + m.osvM = &windowsVersionMatcher{ + windowsOSVersion: getWindowsOSVersion(platform.OSVersion), + } + // In prior versions, on windows, the returned matcher implements a + // MatchComprarer interface. + // This preserves that behavior for backwards compatibility. + // + // TODO: This isn't actually used in this package, except for a test case, + // which may have been an unintended side of some refactor. + // It was likely intended to be used in `Ordered` but it is not since + // `Less` that is implemented here ends up getting masked due to wrapping. + if runtime.GOOS == "windows" { + return &windowsMatchComparer{m} + } + } + return m +} + +type osVerMatcher interface { + Match(string) bool +} + +type matcher struct { + specs.Platform + osvM osVerMatcher +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant && + m.matchOSVersion(platform) +} + +func (m *matcher) matchOSVersion(platform specs.Platform) bool { + if m.osvM != nil { + return m.osvM.Match(platform.OSVersion) + } + return true +} + +func (m *matcher) String() string { + return FormatAll(m.Platform) +} + +// ParseAll parses a list of platform specifiers into a list of platform. +func ParseAll(specifiers []string) ([]specs.Platform, error) { + platforms := make([]specs.Platform, len(specifiers)) + for i, s := range specifiers { + p, err := Parse(s) + if err != nil { + return nil, fmt.Errorf("invalid platform %s: %w", s, err) + } + platforms[i] = p + } + return platforms, nil +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `[()]||[()]/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. The OSVersion can be part of the OS like `windows(10.0.17763)` +// When an OSVersion is specified, then specs.Platform.OSVersion is populated with that value, +// and an empty string otherwise. +// If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errInvalidArgument) + } + + // Limit to 4 elements to prevent unbounded split + parts := strings.SplitN(specifier, "/", 4) + + var p specs.Platform + for i, part := range parts { + if i == 0 { + // First element is [()] + osVer := osAndVersionRe.FindStringSubmatch(part) + if osVer == nil { + return specs.Platform{}, fmt.Errorf("%q is an invalid OS component of %q: OSAndVersion specifier component must match %q: %w", part, specifier, osAndVersionRe.String(), errInvalidArgument) + } + + p.OS = normalizeOS(osVer[1]) + p.OSVersion = osVer[2] + } else { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errInvalidArgument) + } + } + } + + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS (with or + // without the optional OSVersion specified) and look it up. + // If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errInvalidArgument) + case 2: + // In this case, we treat as a regular OS[(OSVersion)]/arch pair. We don't care + // about whether or not we know of the platform. + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errInvalidArgument) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// FormatAll returns a string specifier that also includes the OSVersion from the +// provided platform specification. +func FormatAll(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + if platform.OSVersion != "" { + OSAndVersion := fmt.Sprintf(osAndVersionFormat, platform.OS, platform.OSVersion) + return path.Join(OSAndVersion, platform.Architecture, platform.Variant) + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + return platform +} diff --git a/vendor/github.com/docker/model-runner/pkg/diskusage/diskusage.go b/vendor/github.com/docker/model-runner/pkg/diskusage/diskusage.go new file mode 100644 index 00000000..e2ba3ff1 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/diskusage/diskusage.go @@ -0,0 +1,24 @@ +package diskusage + +import ( + "io/fs" + "path/filepath" +) + +func Size(path string) (int64, error) { + var size int64 + err := filepath.WalkDir(path, func(_ string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.Type().IsRegular() { + info, err := d.Info() + if err != nil { + return err + } + size += info.Size() + } + return nil + }) + return size, err +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backend.go b/vendor/github.com/docker/model-runner/pkg/inference/backend.go index b5eff256..48676626 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/backend.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/backend.go @@ -29,6 +29,11 @@ func (m BackendMode) String() string { } } +type BackendConfiguration struct { + ContextSize int64 + RawFlags []string +} + // Backend is the interface implemented by inference engine backends. Backend // implementations need not be safe for concurrent invocation of the following // methods, though their underlying server implementations do need to support @@ -66,7 +71,9 @@ type Backend interface { // to be loaded. Backends should not load multiple models at once and should // instead load only the specified model. Backends should still respond to // OpenAI API requests for other models with a 421 error code. - Run(ctx context.Context, socket, model string, mode BackendMode) error + Run(ctx context.Context, socket, model string, mode BackendMode, config *BackendConfiguration) error // Status returns a description of the backend's state. Status() string + // GetDiskUsage returns the disk usage of the backend. + GetDiskUsage() (int64, error) } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download.go new file mode 100644 index 00000000..24b23a4f --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download.go @@ -0,0 +1,200 @@ +package llamacpp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + + "github.com/docker/model-runner/pkg/internal/dockerhub" + "github.com/docker/model-runner/pkg/logging" +) + +const ( + hubNamespace = "docker" + hubRepo = "docker-model-backend-llamacpp" +) + +var ( + ShouldUseGPUVariant bool + ShouldUseGPUVariantLock sync.Mutex + ShouldUpdateServer = true + ShouldUpdateServerLock sync.Mutex + DesiredServerVersion = "latest" + DesiredServerVersionLock sync.Mutex + errLlamaCppUpToDate = errors.New("bundled llama.cpp version is up to date, no need to update") + errLlamaCppUpdateDisabled = errors.New("llama.cpp auto-updated is disabled") +) + +func GetDesiredServerVersion() string { + DesiredServerVersionLock.Lock() + defer DesiredServerVersionLock.Unlock() + return DesiredServerVersion +} + +func SetDesiredServerVersion(version string) { + DesiredServerVersionLock.Lock() + defer DesiredServerVersionLock.Unlock() + DesiredServerVersion = version +} + +func (l *llamaCpp) downloadLatestLlamaCpp(ctx context.Context, log logging.Logger, httpClient *http.Client, + llamaCppPath, vendoredServerStoragePath, desiredVersion, desiredVariant string, +) error { + ShouldUpdateServerLock.Lock() + shouldUpdateServer := ShouldUpdateServer + ShouldUpdateServerLock.Unlock() + if !shouldUpdateServer { + log.Infof("downloadLatestLlamaCpp: update disabled") + return errLlamaCppUpdateDisabled + } + + log.Infof("downloadLatestLlamaCpp: %s, %s, %s, %s", desiredVersion, desiredVariant, vendoredServerStoragePath, llamaCppPath) + desiredTag := desiredVersion + "-" + desiredVariant + url := fmt.Sprintf("https://hub.docker.com/v2/namespaces/%s/repositories/%s/tags/%s", hubNamespace, hubRepo, desiredTag) + resp, err := httpClient.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("failed to read response body: %w", err) + } + + // https://docs.docker.com/reference/api/hub/latest/#tag/repositories/paths/~1v2~1namespaces~1%7Bnamespace%7D~1repositories~1%7Brepository%7D~1tags~1%7Btag%7D/get + var response struct { + Name string `json:"name"` + Digest string `json:"digest"` + } + + if err := json.Unmarshal(body, &response); err != nil { + return fmt.Errorf("failed to unmarshal response body: %w", err) + } + + var latest string + if response.Name == desiredTag { + latest = response.Digest + } + if latest == "" { + return fmt.Errorf("could not find the %s tag", desiredTag) + } + + bundledVersionFile := filepath.Join(vendoredServerStoragePath, "com.docker.llama-server.digest") + currentVersionFile := filepath.Join(filepath.Dir(llamaCppPath), ".llamacpp_version") + + data, err := os.ReadFile(bundledVersionFile) + if err != nil { + return fmt.Errorf("failed to read bundled llama.cpp version: %w", err) + } else if strings.TrimSpace(string(data)) == latest { + l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s", + desiredTag, latest, getLlamaCppVersion(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server"))) + return errLlamaCppUpToDate + } + + data, err = os.ReadFile(currentVersionFile) + if err != nil { + log.Warnf("failed to read current llama.cpp version: %v", err) + log.Warnf("proceeding to update llama.cpp binary") + } else if strings.TrimSpace(string(data)) == latest { + log.Infoln("current llama.cpp version is already up to date") + if _, err := os.Stat(llamaCppPath); err == nil { + l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s", + desiredTag, latest, getLlamaCppVersion(log, llamaCppPath)) + return nil + } + log.Infoln("llama.cpp binary must be updated, proceeding to update it") + } else { + log.Infof("current llama.cpp version is outdated: %s vs %s, proceeding to update it", strings.TrimSpace(string(data)), latest) + } + + image := fmt.Sprintf("registry-1.docker.io/%s/%s@%s", hubNamespace, hubRepo, latest) + downloadDir, err := os.MkdirTemp("", "llamacpp-install") + if err != nil { + return fmt.Errorf("could not create temporary directory: %w", err) + } + defer os.RemoveAll(downloadDir) + + l.status = fmt.Sprintf("downloading %s (%s) variant of llama.cpp", desiredTag, latest) + if err := extractFromImage(ctx, log, image, runtime.GOOS, runtime.GOARCH, downloadDir); err != nil { + return fmt.Errorf("could not extract image: %w", err) + } + + if err := os.RemoveAll(filepath.Dir(llamaCppPath)); err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to clear inference binary dir: %w", err) + } + if err := os.RemoveAll(filepath.Join(filepath.Dir(filepath.Dir(llamaCppPath)), "lib")); err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to clear inference library dir: %w", err) + } + + if err := os.MkdirAll(filepath.Dir(filepath.Dir(llamaCppPath)), 0o755); err != nil { + return fmt.Errorf("could not create directory for llama.cpp artifacts: %w", err) + } + + rootDir := fmt.Sprintf("com.docker.llama-server.native.%s.%s.%s", runtime.GOOS, desiredVariant, runtime.GOARCH) + if err := os.Rename(filepath.Join(downloadDir, rootDir, "bin"), filepath.Dir(llamaCppPath)); err != nil { + return fmt.Errorf("could not move llama.cpp binary: %w", err) + } + if err := os.Chmod(llamaCppPath, 0o755); err != nil { + return fmt.Errorf("could not chmod llama.cpp binary: %w", err) + } + + libDir := filepath.Join(downloadDir, rootDir, "lib") + fi, err := os.Stat(libDir) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed to stat llama.cpp lib dir: %w", err) + } + if err == nil && fi.IsDir() { + if err := os.Rename(libDir, filepath.Join(filepath.Dir(filepath.Dir(llamaCppPath)), "lib")); err != nil { + return fmt.Errorf("could not move llama.cpp libs: %w", err) + } + } + + log.Infoln("successfully updated llama.cpp binary") + l.status = fmt.Sprintf("running llama.cpp %s (%s) version: %s", desiredTag, latest, getLlamaCppVersion(log, llamaCppPath)) + log.Infoln(l.status) + + if err := os.WriteFile(currentVersionFile, []byte(latest), 0o644); err != nil { + log.Warnf("failed to save llama.cpp version: %v", err) + } + + return nil +} + +func extractFromImage(ctx context.Context, log logging.Logger, image, requiredOs, requiredArch, destination string) error { + log.Infof("Extracting image %q to %q", image, destination) + tmpDir, err := os.MkdirTemp("", "docker-tar-extract") + if err != nil { + return err + } + imageTar := filepath.Join(tmpDir, "save.tar") + if err := dockerhub.PullPlatform(ctx, image, imageTar, requiredOs, requiredArch); err != nil { + return err + } + return dockerhub.Extract(imageTar, requiredArch, requiredOs, destination) +} + +func getLlamaCppVersion(log logging.Logger, llamaCpp string) string { + output, err := exec.Command(llamaCpp, "--version").CombinedOutput() + if err != nil { + log.Warnf("could not get llama.cpp version: %v", err) + return "unknown" + } + re := regexp.MustCompile(`version: \d+ \((\w+)\)`) + matches := re.FindStringSubmatch(string(output)) + if len(matches) == 2 { + return matches[1] + } + log.Warnf("failed to parse llama.cpp version from output:\n%s", strings.TrimSpace(string(output))) + return "unknown" +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_darwin.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_darwin.go new file mode 100644 index 00000000..a1c0c9d2 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_darwin.go @@ -0,0 +1,17 @@ +package llamacpp + +import ( + "context" + "net/http" + + "github.com/docker/model-runner/pkg/logging" +) + +func (l *llamaCpp) ensureLatestLlamaCpp(ctx context.Context, log logging.Logger, httpClient *http.Client, + llamaCppPath, vendoredServerStoragePath string, +) error { + desiredVersion := GetDesiredServerVersion() + desiredVariant := "metal" + return l.downloadLatestLlamaCpp(ctx, log, httpClient, llamaCppPath, vendoredServerStoragePath, desiredVersion, + desiredVariant) +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_linux.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_linux.go new file mode 100644 index 00000000..2b7d55ff --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_linux.go @@ -0,0 +1,18 @@ +package llamacpp + +import ( + "context" + "fmt" + "net/http" + "path/filepath" + + "github.com/docker/model-runner/pkg/logging" +) + +func (l *llamaCpp) ensureLatestLlamaCpp(_ context.Context, log logging.Logger, _ *http.Client, + _, vendoredServerStoragePath string, +) error { + l.status = fmt.Sprintf("running llama.cpp version: %s", + getLlamaCppVersion(log, filepath.Join(vendoredServerStoragePath, "com.docker.llama-server"))) + return errLlamaCppUpdateDisabled +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_windows.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_windows.go new file mode 100644 index 00000000..d1a84b35 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/download_windows.go @@ -0,0 +1,46 @@ +package llamacpp + +import ( + "context" + "fmt" + "net/http" + "path/filepath" + "runtime" + + "github.com/docker/model-runner/pkg/logging" +) + +func (l *llamaCpp) ensureLatestLlamaCpp(ctx context.Context, log logging.Logger, httpClient *http.Client, + llamaCppPath, vendoredServerStoragePath string, +) error { + nvGPUInfoBin := filepath.Join(vendoredServerStoragePath, "com.docker.nv-gpu-info.exe") + var canUseCUDA11, canUseOpenCL bool + var err error + ShouldUseGPUVariantLock.Lock() + defer ShouldUseGPUVariantLock.Unlock() + if ShouldUseGPUVariant { + if runtime.GOARCH == "amd64" { + canUseCUDA11, err = hasCUDA11CapableGPU(ctx, nvGPUInfoBin) + if err != nil { + l.status = fmt.Sprintf("failed to check CUDA 11 capability: %v", err) + return fmt.Errorf("failed to check CUDA 11 capability: %w", err) + } + } else if runtime.GOARCH == "arm64" { + canUseOpenCL, err = hasOpenCL() + if err != nil { + l.status = fmt.Sprintf("failed to check OpenCL capability: %v", err) + return fmt.Errorf("failed to check OpenCL capability: %w", err) + } + } + } + desiredVersion := GetDesiredServerVersion() + desiredVariant := "cpu" + if canUseCUDA11 { + desiredVariant = "cuda" + } else if canUseOpenCL { + desiredVariant = "opencl" + } + l.status = fmt.Sprintf("looking for updates for %s variant", desiredVariant) + return l.downloadLatestLlamaCpp(ctx, log, httpClient, llamaCppPath, vendoredServerStoragePath, desiredVersion, + desiredVariant) +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_notwindows.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_notwindows.go new file mode 100644 index 00000000..d20748ef --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_notwindows.go @@ -0,0 +1,7 @@ +//go:build !windows + +package llamacpp + +import "context" + +func CanUseGPU(context.Context, string) (bool, error) { return false, nil } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_windows.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_windows.go new file mode 100644 index 00000000..e0bb0f64 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/gpuinfo_windows.go @@ -0,0 +1,112 @@ +package llamacpp + +import ( + "bufio" + "context" + "errors" + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/jaypipes/ghw" +) + +func hasNVIDIAGPU() (bool, error) { + gpus, err := ghw.GPU() + if err != nil { + return false, err + } + for _, gpu := range gpus.GraphicsCards { + if strings.ToLower(gpu.DeviceInfo.Vendor.Name) == "nvidia" { + return true, nil + } + } + return false, nil +} + +func hasCUDA11CapableGPU(ctx context.Context, nvGPUInfoBin string) (bool, error) { + nvGPU, err := hasNVIDIAGPU() + if !nvGPU || err != nil { + return false, err + } + cmd := exec.CommandContext(ctx, nvGPUInfoBin) + out, err := cmd.CombinedOutput() + if err != nil { + return false, err + } + sc := bufio.NewScanner(strings.NewReader(string(out))) + for sc.Scan() { + version, found := strings.CutPrefix(sc.Text(), "driver version:") + if found { + version = strings.TrimSpace(version) + if len(version) != 5 { + return false, fmt.Errorf("unexpected NVIDIA driver version format: %s", version) + } + major, err := strconv.Atoi(version[:3]) + if err != nil { + return false, fmt.Errorf("unexpected NVIDIA driver version format: %s", version) + } + minor, err := strconv.Atoi(version[3:5]) + if err != nil { + return false, fmt.Errorf("unexpected NVIDIA driver version format: %s", version) + } + return major > 452 || (major == 452 && minor >= 39), nil + } + } + return false, nil +} + +func hasSupportedAdrenoGPU() (bool, error) { + gpus, err := ghw.GPU() + if err != nil { + return false, err + } + for _, gpu := range gpus.GraphicsCards { + isAdrenoFamily := strings.Contains(gpu.DeviceInfo.Product.Name, "Adreno") || + strings.Contains(gpu.DeviceInfo.Product.Name, "Qualcomm") + if isAdrenoFamily { + // llama.cpp will detect / classify a limited set of Adreno GPU + // versions, but it won't actually require a specific version, even + // though some, e.g. the 6xx series, won't work. Since we'll have + // the ability disable GPU support, we'll allow the model runner to + // try optimistically. + return true, nil + } + } + return false, nil +} + +func hasOpenCL() (bool, error) { + // We compile our llama.cpp backend with Adreno-specific kernels, so for now + // we don't support OpenCL on other GPUs. + adrenoGPU, err := hasSupportedAdrenoGPU() + if !adrenoGPU || err != nil { + return false, err + } + + // Check for an OpenCL implementation. + opencl, err := syscall.LoadLibrary("OpenCL.dll") + if err != nil { + if errors.Is(err, syscall.ERROR_MOD_NOT_FOUND) { + return false, nil + } + return false, fmt.Errorf("unable to load OpenCL DLL: %w", err) + } + syscall.FreeLibrary(opencl) + return true, nil +} + +func CanUseGPU(ctx context.Context, nvGPUInfoBin string) (bool, error) { + // We don't ship com.docker.nv-gpu-info.exe on Windows/ARM64 at the moment, + // so skip the CUDA check there for now. The OpenCL check is portable. + if runtime.GOARCH == "amd64" { + haveCUDA11GPU, err := hasCUDA11CapableGPU(ctx, nvGPUInfoBin) + if haveCUDA11GPU || err != nil { + return haveCUDA11GPU, err + } + } + return hasOpenCL() +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go new file mode 100644 index 00000000..930535da --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp.go @@ -0,0 +1,221 @@ +package llamacpp + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/docker/model-runner/pkg/diskusage" + "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/inference/config" + "github.com/docker/model-runner/pkg/inference/models" + "github.com/docker/model-runner/pkg/logging" + "github.com/docker/model-runner/pkg/tailbuffer" +) + +const ( + // Name is the backend name. + Name = "llama.cpp" +) + +// llamaCpp is the llama.cpp-based backend implementation. +type llamaCpp struct { + // log is the associated logger. + log logging.Logger + // modelManager is the shared model manager. + modelManager *models.Manager + // serverLog is the logger to use for the llama.cpp server process. + serverLog logging.Logger + updatedLlamaCpp bool + // vendoredServerStoragePath is the parent path of the vendored version of com.docker.llama-server. + vendoredServerStoragePath string + // updatedServerStoragePath is the parent path of the updated version of com.docker.llama-server. + // It is also where updates will be stored when downloaded. + updatedServerStoragePath string + // status is the state in which the llama.cpp backend is in. + status string + // config is the configuration for the llama.cpp backend. + config config.BackendConfig +} + +// New creates a new llama.cpp-based backend. +func New( + log logging.Logger, + modelManager *models.Manager, + serverLog logging.Logger, + vendoredServerStoragePath string, + updatedServerStoragePath string, + conf config.BackendConfig, +) (inference.Backend, error) { + // If no config is provided, use the default configuration + if conf == nil { + conf = NewDefaultLlamaCppConfig() + } + + return &llamaCpp{ + log: log, + modelManager: modelManager, + serverLog: serverLog, + vendoredServerStoragePath: vendoredServerStoragePath, + updatedServerStoragePath: updatedServerStoragePath, + config: conf, + }, nil +} + +// Name implements inference.Backend.Name. +func (l *llamaCpp) Name() string { + return Name +} + +// UsesExternalModelManagement implements +// inference.Backend.UsesExternalModelManagement. +func (l *llamaCpp) UsesExternalModelManagement() bool { + return false +} + +// Install implements inference.Backend.Install. +func (l *llamaCpp) Install(ctx context.Context, httpClient *http.Client) error { + l.updatedLlamaCpp = false + + // We don't currently support this backend on Windows. We'll likely + // never support it on Intel Macs. + if (runtime.GOOS == "darwin" && runtime.GOARCH == "amd64") || + (runtime.GOOS == "windows" && !(runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64")) { + return errors.New("platform not supported") + } + + llamaServerBin := "com.docker.llama-server" + if runtime.GOOS == "windows" { + llamaServerBin = "com.docker.llama-server.exe" + } + + l.status = "installing" + + // Temporary workaround for dynamically downloading llama.cpp from Docker Hub. + // Internet access and an available docker/docker-model-backend-llamacpp:latest on Docker Hub are required. + // Even if docker/docker-model-backend-llamacpp:latest has been downloaded before, we still require its + // digest to be equal to the one on Docker Hub. + llamaCppPath := filepath.Join(l.updatedServerStoragePath, llamaServerBin) + if err := l.ensureLatestLlamaCpp(ctx, l.log, httpClient, llamaCppPath, l.vendoredServerStoragePath); err != nil { + l.log.Infof("failed to ensure latest llama.cpp: %v\n", err) + if !(errors.Is(err, errLlamaCppUpToDate) || errors.Is(err, errLlamaCppUpdateDisabled)) { + l.status = fmt.Sprintf("failed to install llama.cpp: %v", err) + } + if errors.Is(err, context.Canceled) { + return err + } + } else { + l.updatedLlamaCpp = true + } + + return nil +} + +// Run implements inference.Backend.Run. +func (l *llamaCpp) Run(ctx context.Context, socket, model string, mode inference.BackendMode, config *inference.BackendConfiguration) error { + modelPath, err := l.modelManager.GetModelPath(model) + l.log.Infof("Model path: %s", modelPath) + if err != nil { + return fmt.Errorf("failed to get model path: %w", err) + } + + if err := os.RemoveAll(socket); err != nil && !errors.Is(err, fs.ErrNotExist) { + l.log.Warnf("failed to remove socket file %s: %w\n", socket, err) + l.log.Warnln("llama.cpp may not be able to start") + } + + binPath := l.vendoredServerStoragePath + if l.updatedLlamaCpp { + binPath = l.updatedServerStoragePath + } + + args := l.config.GetArgs(modelPath, socket, mode) + + if config != nil { + if config.ContextSize >= 0 { + args = append(args, "--ctx-size", strconv.Itoa(int(config.ContextSize))) + } + args = append(args, config.RawFlags...) + } + + l.log.Infof("llamaCppArgs: %v", args) + llamaCppProcess := exec.CommandContext( + ctx, + filepath.Join(binPath, "com.docker.llama-server"), + args..., + ) + llamaCppProcess.Cancel = func() error { + if runtime.GOOS == "windows" { + return llamaCppProcess.Process.Kill() + } + return llamaCppProcess.Process.Signal(os.Interrupt) + } + tailBuf := tailbuffer.NewTailBuffer(1024) + serverLogStream := l.serverLog.Writer() + out := io.MultiWriter(serverLogStream, tailBuf) + llamaCppProcess.Stdout = serverLogStream + llamaCppProcess.Stderr = out + + if err := llamaCppProcess.Start(); err != nil { + return fmt.Errorf("unable to start llama.cpp: %w", err) + } + + llamaCppErrors := make(chan error, 1) + go func() { + llamaCppErr := llamaCppProcess.Wait() + serverLogStream.Close() + + errOutput := new(strings.Builder) + if _, err := io.Copy(errOutput, tailBuf); err != nil { + l.log.Warnf("failed to read server output tail: %w", err) + } + + if len(errOutput.String()) != 0 { + llamaCppErr = fmt.Errorf("llama.cpp exit status: %w\nwith output: %s", llamaCppErr, errOutput.String()) + } else { + llamaCppErr = fmt.Errorf("llama.cpp exit status: %w", llamaCppErr) + } + + llamaCppErrors <- llamaCppErr + close(llamaCppErrors) + if err := os.Remove(socket); err != nil && !errors.Is(err, fs.ErrNotExist) { + l.log.Warnf("failed to remove socket file %s on exit: %w\n", socket, err) + } + }() + defer func() { + <-llamaCppErrors + }() + + select { + case <-ctx.Done(): + return nil + case llamaCppErr := <-llamaCppErrors: + select { + case <-ctx.Done(): + return nil + default: + } + return fmt.Errorf("llama.cpp terminated unexpectedly: %w", llamaCppErr) + } +} + +func (l *llamaCpp) Status() string { + return l.status +} + +func (l *llamaCpp) GetDiskUsage() (int64, error) { + size, err := diskusage.Size(l.updatedServerStoragePath) + if err != nil { + return 0, fmt.Errorf("error while getting store size: %v", err) + } + return size, nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go new file mode 100644 index 00000000..767cbb17 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/backends/llamacpp/llamacpp_config.go @@ -0,0 +1,59 @@ +package llamacpp + +import ( + "runtime" + "strconv" + + "github.com/docker/model-runner/pkg/inference" +) + +// Config is the configuration for the llama.cpp backend. +type Config struct { + // Args are the base arguments that are always included. + Args []string +} + +// NewDefaultLlamaCppConfig creates a new LlamaCppConfig with default values. +func NewDefaultLlamaCppConfig() *Config { + args := append([]string{"--jinja", "-ngl", "100"}) + + // Special case for Windows ARM64 + if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" { + // Using a thread count equal to core count results in bad performance, and there seems to be little to no gain + // in going beyond core_count/2. + if !containsArg(args, "--threads") { + nThreads := min(2, runtime.NumCPU()/2) + args = append(args, "--threads", strconv.Itoa(nThreads)) + } + } + + return &Config{ + Args: args, + } +} + +// GetArgs implements BackendConfig.GetArgs. +func (c *Config) GetArgs(modelPath, socket string, mode inference.BackendMode) []string { + // Start with the arguments from LlamaCppConfig + args := append([]string{}, c.Args...) + + // Add model and socket arguments + args = append(args, "--model", modelPath, "--host", socket) + + // Add mode-specific arguments + if mode == inference.BackendModeEmbedding { + args = append(args, "--embeddings") + } + + return args +} + +// containsArg checks if the given argument is already in the args slice. +func containsArg(args []string, arg string) bool { + for _, a := range args { + if a == arg { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/config/config.go b/vendor/github.com/docker/model-runner/pkg/inference/config/config.go new file mode 100644 index 00000000..6d05d1af --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/config/config.go @@ -0,0 +1,15 @@ +package config + +import ( + "github.com/docker/model-runner/pkg/inference" +) + +// BackendConfig is the interface implemented by backend configurations. +// It provides methods to get command-line arguments for a backend based on +// the model path, socket, and mode. +type BackendConfig interface { + // GetArgs returns the command-line arguments for the backend. + // It takes the model path, socket, and mode as input and returns + // the appropriate arguments for the backend. + GetArgs(modelPath, socket string, mode inference.BackendMode) []string +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/cors.go b/vendor/github.com/docker/model-runner/pkg/inference/cors.go new file mode 100644 index 00000000..1bd468f2 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/cors.go @@ -0,0 +1,69 @@ +package inference + +import ( + "net/http" + "os" + "strings" +) + +// CorsMiddleware handles CORS and OPTIONS preflight requests with optional allowedOrigins. +// If allowedOrigins is nil or empty, it falls back to getAllowedOrigins(). +func CorsMiddleware(allowedOrigins []string, next http.Handler) http.Handler { + if len(allowedOrigins) == 0 { + allowedOrigins = getAllowedOrigins() + } + + // Explicitly disable all origins. + if allowedOrigins == nil { + return next + } + + allowAll := len(allowedOrigins) == 1 && allowedOrigins[0] == "*" + allowedSet := make(map[string]struct{}, len(allowedOrigins)) + for _, o := range allowedOrigins { + allowedSet[o] = struct{}{} + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if origin := r.Header.Get("Origin"); origin != "" && (allowAll || originAllowed(origin, allowedSet)) { + w.Header().Set("Access-Control-Allow-Origin", origin) + } + + // Handle OPTIONS requests. + if r.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Credentials", "true") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST") + w.Header().Set("Access-Control-Allow-Headers", "*") + w.WriteHeader(http.StatusOK) + return + } + + next.ServeHTTP(w, r) + }) +} + +func originAllowed(origin string, allowedSet map[string]struct{}) bool { + _, ok := allowedSet[origin] + return ok +} + +// getAllowedOrigins retrieves allowed origins from the DMR_ORIGINS environment variable. +// If the variable is not set it returns nil, indicating no origins are allowed. +func getAllowedOrigins() (origins []string) { + dmrOrigins := os.Getenv("DMR_ORIGINS") + if dmrOrigins == "" { + return nil + } + + for _, o := range strings.Split(dmrOrigins, ",") { + if trimmed := strings.TrimSpace(o); trimmed != "" { + origins = append(origins, trimmed) + } + } + + if len(origins) == 0 { + return nil + } + + return origins +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/models/api.go b/vendor/github.com/docker/model-runner/pkg/inference/models/api.go index 68be185d..a21864dd 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/models/api.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/models/api.go @@ -88,7 +88,7 @@ type Model struct { // ID is the globally unique model identifier. ID string `json:"id"` // Tags are the list of tags associated with the model. - Tags []string `json:"tags"` + Tags []string `json:"tags,omitempty"` // Created is the Unix epoch timestamp corresponding to the model creation. Created int64 `json:"created"` // Config describes the model. diff --git a/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go b/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go index b62c120e..c87efa83 100644 --- a/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go +++ b/vendor/github.com/docker/model-runner/pkg/inference/models/manager.go @@ -10,10 +10,12 @@ import ( "path" "strconv" "strings" + "sync" "github.com/docker/model-distribution/distribution" "github.com/docker/model-distribution/registry" "github.com/docker/model-distribution/types" + "github.com/docker/model-runner/pkg/diskusage" "github.com/docker/model-runner/pkg/inference" "github.com/docker/model-runner/pkg/logging" "github.com/sirupsen/logrus" @@ -36,6 +38,10 @@ type Manager struct { router *http.ServeMux // distributionClient is the client for model distribution. distributionClient *distribution.Client + // registryClient is the client for model registry. + registryClient *registry.Client + // lock is used to synchronize access to the models manager's router. + lock sync.Mutex } type ClientConfig struct { @@ -50,7 +56,7 @@ type ClientConfig struct { } // NewManager creates a new model's manager. -func NewManager(log logging.Logger, c ClientConfig) *Manager { +func NewManager(log logging.Logger, c ClientConfig, allowedOrigins []string) *Manager { // Create the model distribution client. distributionClient, err := distribution.NewClient( distribution.WithStoreRootPath(c.StoreRootPath), @@ -64,12 +70,19 @@ func NewManager(log logging.Logger, c ClientConfig) *Manager { // respond to requests, but may return errors if the client is required. } + // Create the model registry client. + registryClient := registry.NewClient( + registry.WithTransport(c.Transport), + registry.WithUserAgent(c.UserAgent), + ) + // Create the manager. m := &Manager{ log: log, pullTokens: make(chan struct{}, maximumConcurrentModelPulls), router: http.NewServeMux(), distributionClient: distributionClient, + registryClient: registryClient, } // Register routes. @@ -77,7 +90,7 @@ func NewManager(log logging.Logger, c ClientConfig) *Manager { http.Error(w, "not found", http.StatusNotFound) }) - for route, handler := range m.routeHandlers() { + for route, handler := range m.routeHandlers(allowedOrigins) { m.router.HandleFunc(route, handler) } @@ -90,8 +103,22 @@ func NewManager(log logging.Logger, c ClientConfig) *Manager { return m } -func (m *Manager) routeHandlers() map[string]http.HandlerFunc { - return map[string]http.HandlerFunc{ +func (m *Manager) RebuildRoutes(allowedOrigins []string) { + m.lock.Lock() + defer m.lock.Unlock() + // Clear existing routes and re-register them. + m.router = http.NewServeMux() + // Register routes. + m.router.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "not found", http.StatusNotFound) + }) + for route, handler := range m.routeHandlers(allowedOrigins) { + m.router.HandleFunc(route, handler) + } +} + +func (m *Manager) routeHandlers(allowedOrigins []string) map[string]http.HandlerFunc { + handlers := map[string]http.HandlerFunc{ "POST " + inference.ModelsPrefix + "/create": m.handleCreateModel, "GET " + inference.ModelsPrefix: m.handleGetModels, "GET " + inference.ModelsPrefix + "/{name...}": m.handleGetModel, @@ -102,10 +129,16 @@ func (m *Manager) routeHandlers() map[string]http.HandlerFunc { "GET " + inference.InferencePrefix + "/v1/models": m.handleOpenAIGetModels, "GET " + inference.InferencePrefix + "/v1/models/{name...}": m.handleOpenAIGetModel, } + for route, handler := range handlers { + if strings.HasPrefix(route, "GET ") { + handlers[route] = inference.CorsMiddleware(allowedOrigins, handler).ServeHTTP + } + } + return handlers } func (m *Manager) GetRoutes() []string { - routeHandlers := m.routeHandlers() + routeHandlers := m.routeHandlers(nil) routes := make([]string, 0, len(routeHandlers)) for route := range routeHandlers { routes = append(routes, route) @@ -129,7 +162,7 @@ func (m *Manager) handleCreateModel(w http.ResponseWriter, r *http.Request) { // Pull the model. In the future, we may support additional operations here // besides pulling (such as model building). - if err := m.PullModel(r.Context(), request.From, w); err != nil { + if err := m.PullModel(request.From, r, w); err != nil { if errors.Is(err, registry.ErrInvalidReference) { m.log.Warnf("Invalid model reference %q: %v", request.From, err) http.Error(w, "Invalid model reference", http.StatusBadRequest) @@ -182,24 +215,36 @@ func (m *Manager) handleGetModels(w http.ResponseWriter, r *http.Request) { // handleGetModel handles GET /models/{name} requests. func (m *Manager) handleGetModel(w http.ResponseWriter, r *http.Request) { - if m.distributionClient == nil { - http.Error(w, "model distribution service unavailable", http.StatusServiceUnavailable) + // Parse remote query parameter + remote := false + if r.URL.Query().Has("remote") { + if val, err := strconv.ParseBool(r.URL.Query().Get("remote")); err != nil { + m.log.Warnln("Error while parsing remote query parameter:", err) + } else { + remote = val + } + } + + if remote && m.registryClient == nil { + http.Error(w, "registry client unavailable", http.StatusServiceUnavailable) return } - // Query the model. - model, err := m.GetModel(r.PathValue("name")) + var apiModel *Model + var err error + + if remote { + apiModel, err = getRemoteModel(r.Context(), m, r.PathValue("name")) + } else { + apiModel, err = getLocalModel(m, r.PathValue("name")) + } + if err != nil { - if errors.Is(err, distribution.ErrModelNotFound) { + if errors.Is(err, distribution.ErrModelNotFound) || errors.Is(err, registry.ErrModelNotFound) { http.Error(w, err.Error(), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) + return } - return - } - apiModel, err := ToModel(model) - if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -211,6 +256,56 @@ func (m *Manager) handleGetModel(w http.ResponseWriter, r *http.Request) { } } +func getLocalModel(m *Manager, name string) (*Model, error) { + if m.distributionClient == nil { + return nil, errors.New("model distribution service unavailable") + } + + // Query the model. + model, err := m.GetModel(name) + if err != nil { + return nil, err + } + + return ToModel(model) +} + +func getRemoteModel(ctx context.Context, m *Manager, name string) (*Model, error) { + if m.registryClient == nil { + return nil, errors.New("registry client unavailable") + } + + m.log.Infoln("Getting remote model:", name) + model, err := m.registryClient.Model(ctx, name) + if err != nil { + return nil, err + } + + id, err := model.ID() + if err != nil { + return nil, err + } + + descriptor, err := model.Descriptor() + if err != nil { + return nil, err + } + + config, err := model.Config() + if err != nil { + return nil, err + } + + apiModel := &Model{ + ID: id, + Tags: nil, + Created: descriptor.Created.Unix(), + Config: config, + } + + return apiModel, nil +} + // handleDeleteModel handles DELETE /models/{name} requests. // query params: // - force: if true, delete the model even if it has multiple tags @@ -378,7 +473,7 @@ func (m *Manager) handlePushModel(w http.ResponseWriter, r *http.Request, model } // Call the PushModel method on the distribution client. - if err := m.PushModel(r.Context(), model, w); err != nil { + if err := m.PushModel(model, r, w); err != nil { if errors.Is(err, distribution.ErrInvalidReference) { m.log.Warnf("Invalid model reference %q: %v", model, err) http.Error(w, "Invalid model reference", http.StatusBadRequest) @@ -399,8 +494,25 @@ func (m *Manager) handlePushModel(w http.ResponseWriter, r *http.Request, model } } +// GetDiskUsage returns the disk usage of the model store. +func (m *Manager) GetDiskUsage() (int64, error, int) { + if m.distributionClient == nil { + return 0, errors.New("model distribution service unavailable"), http.StatusServiceUnavailable + } + + storePath := m.distributionClient.GetStorePath() + size, err := diskusage.Size(storePath) + if err != nil { + return 0, fmt.Errorf("error while getting store size: %v", err), http.StatusInternalServerError + } + + return size, nil, http.StatusOK +} + // ServeHTTP implement net/http.Handler.ServeHTTP. func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) { + m.lock.Lock() + defer m.lock.Unlock() m.router.ServeHTTP(w, r) } @@ -428,11 +540,11 @@ func (m *Manager) GetModelPath(ref string) (string, error) { // PullModel pulls a model to local storage. Any error it returns is suitable // for writing back to the client. -func (m *Manager) PullModel(ctx context.Context, model string, w http.ResponseWriter) error { +func (m *Manager) PullModel(model string, r *http.Request, w http.ResponseWriter) error { // Restrict model pull concurrency. select { case <-m.pullTokens: - case <-ctx.Done(): + case <-r.Context().Done(): return context.Canceled } defer func() { @@ -440,11 +552,21 @@ func (m *Manager) PullModel(ctx context.Context, model string, w http.ResponseWr }() // Set up response headers for streaming - w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") w.Header().Set("Transfer-Encoding", "chunked") + // Check Accept header to determine content type + acceptHeader := r.Header.Get("Accept") + isJSON := acceptHeader == "application/json" + + if isJSON { + w.Header().Set("Content-Type", "application/json") + } else { + // Defaults to text/plain + w.Header().Set("Content-Type", "text/plain") + } + // Create a flusher to ensure chunks are sent immediately flusher, ok := w.(http.Flusher) if !ok { @@ -455,11 +577,12 @@ func (m *Manager) PullModel(ctx context.Context, model string, w http.ResponseWr progressWriter := &progressResponseWriter{ writer: w, flusher: flusher, + isJSON: isJSON, } // Pull the model using the Docker model distribution client m.log.Infoln("Pulling model:", model) - err := m.distributionClient.PullModel(ctx, model, progressWriter) + err := m.distributionClient.PullModel(r.Context(), model, progressWriter) if err != nil { return fmt.Errorf("error while pulling model: %w", err) } @@ -468,13 +591,22 @@ func (m *Manager) PullModel(ctx context.Context, model string, w http.ResponseWr } // PushModel pushes a model from the store to the registry. -func (m *Manager) PushModel(ctx context.Context, model string, w http.ResponseWriter) error { +func (m *Manager) PushModel(model string, r *http.Request, w http.ResponseWriter) error { // Set up response headers for streaming - w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") w.Header().Set("Transfer-Encoding", "chunked") + // Check Accept header to determine content type + acceptHeader := r.Header.Get("Accept") + isJSON := acceptHeader == "application/json" + + if isJSON { + w.Header().Set("Content-Type", "application/json") + } else { + w.Header().Set("Content-Type", "text/plain") + } + // Create a flusher to ensure chunks are sent immediately flusher, ok := w.(http.Flusher) if !ok { @@ -485,11 +617,12 @@ func (m *Manager) PushModel(ctx context.Context, model string, w http.ResponseWr progressWriter := &progressResponseWriter{ writer: w, flusher: flusher, + isJSON: isJSON, } // Pull the model using the Docker model distribution client m.log.Infoln("Pushing model:", model) - err := m.distributionClient.PushModel(ctx, model, progressWriter) + err := m.distributionClient.PushModel(r.Context(), model, progressWriter) if err != nil { return fmt.Errorf("error while pushing model: %w", err) } @@ -501,11 +634,21 @@ func (m *Manager) PushModel(ctx context.Context, model string, w http.ResponseWr type progressResponseWriter struct { writer http.ResponseWriter flusher http.Flusher + isJSON bool } func (w *progressResponseWriter) Write(p []byte) (n int, err error) { - escapedData := html.EscapeString(string(p)) - n, err = w.writer.Write([]byte(escapedData)) + var data []byte + if w.isJSON { + // For JSON, write the raw bytes without escaping + data = p + } else { + // For plain text, escape HTML + escapedData := html.EscapeString(string(p)) + data = []byte(escapedData) + } + + n, err = w.writer.Write(data) if err != nil { return 0, err } diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/api.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/api.go new file mode 100644 index 00000000..ebd40571 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/api.go @@ -0,0 +1,82 @@ +package scheduling + +import ( + "strings" + "time" + + "github.com/docker/model-runner/pkg/inference" +) + +const ( + // maximumOpenAIInferenceRequestSize is the maximum OpenAI API embedding or + // completion request size that Scheduler will allow. This should be large + // enough to encompass any real-world request but also small enough to avoid + // DoS attacks. + maximumOpenAIInferenceRequestSize = 10 * 1024 * 1024 +) + +// trimRequestPathToOpenAIRoot trims a request path to start at the first +// instance of /v1/ to appear in the path. +func trimRequestPathToOpenAIRoot(path string) string { + index := strings.Index(path, "/v1/") + if index == -1 { + return path + } + return path[index:] +} + +// backendModeForRequest determines the backend operation mode to handle an +// OpenAI inference request. Its second parameter is true if and only if a valid +// mode could be determined. +func backendModeForRequest(path string) (inference.BackendMode, bool) { + if strings.HasSuffix(path, "/v1/chat/completions") || strings.HasSuffix(path, "/v1/completions") { + return inference.BackendModeCompletion, true + } else if strings.HasSuffix(path, "/v1/embeddings") { + return inference.BackendModeEmbedding, true + } + return inference.BackendMode(0), false +} + +// OpenAIInferenceRequest is used to extract the model specification from either +// a chat completion or embedding request in the OpenAI API. +type OpenAIInferenceRequest struct { + // Model is the requested model name. + Model string `json:"model"` +} + +// BackendStatus represents information about a running backend +type BackendStatus struct { + // BackendName is the name of the backend + BackendName string `json:"backend_name"` + // ModelName is the name of the model loaded in the backend + ModelName string `json:"model_name"` + // Mode is the mode the backend is operating in + Mode string `json:"mode"` + // LastUsed represents when this (backend, model, mode) tuple was last used + LastUsed time.Time `json:"last_used,omitempty"` +} + +// DiskUsage represents the disk usage of the models and default backend. +type DiskUsage struct { + ModelsDiskUsage int64 `json:"models_disk_usage"` + DefaultBackendDiskUsage int64 `json:"default_backend_disk_usage"` +} + +// UnloadRequest is used to specify which models to unload. +type UnloadRequest struct { + All bool `json:"all"` + Backend string `json:"backend"` + Models []string `json:"models"` +} + +// UnloadResponse is used to return the number of unloaded runners (backend, model). +type UnloadResponse struct { + UnloadedRunners int `json:"unloaded_runners"` +} + +// ConfigureRequest specifies per-model runtime configuration options. +type ConfigureRequest struct { + Model string `json:"model"` + ContextSize int64 `json:"context-size,omitempty"` + RawRuntimeFlags string `json:"raw-runtime-flags,omitempty"` +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/errors.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/errors.go new file mode 100644 index 00000000..aca6fee2 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/errors.go @@ -0,0 +1,10 @@ +package scheduling + +import ( + "errors" +) + +// ErrBackendNotFound indicates that an unknown backend was requested. If +// returned in conjunction with an HTTP request, it should be paired with a +// 404 response status. +var ErrBackendNotFound = errors.New("backend not found") diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/installer.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/installer.go new file mode 100644 index 00000000..ae3aba83 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/installer.go @@ -0,0 +1,140 @@ +package scheduling + +import ( + "context" + "errors" + "net/http" + "sync/atomic" + + "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/logging" +) + +var ( + // errInstallerNotStarted indicates that the installer has not yet been + // started and thus installation waits are not possible. + errInstallerNotStarted = errors.New("backend installer not started") + // errInstallerShuttingDown indicates that the installer's run loop has been + // terminated and the installer is shutting down. + errInstallerShuttingDown = errors.New("backend installer shutting down") +) + +// installStatus tracks the installation status of a backend. +type installStatus struct { + // installed is closed if and when the corresponding backend's installation + // completes successfully. + installed chan struct{} + // failed is closed if the corresponding backend's installation fails. If + // this channel is closed, then err can be read and returned. + failed chan struct{} + // err is the error that occurred during installation. It should only be + // accessed by readers if (and after) failed is closed. + err error +} + +// installer drives backend installations. +type installer struct { + // log is the associated logger. + log logging.Logger + // backends are the supported inference backends. + backends map[string]inference.Backend + // httpClient is the HTTP client to use for backend installations. + httpClient *http.Client + // started tracks whether or not the installer has been started. + started atomic.Bool + // statuses maps backend names to their installation statuses. + statuses map[string]*installStatus +} + +// newInstaller creates a new backend installer. +func newInstaller( + log logging.Logger, + backends map[string]inference.Backend, + httpClient *http.Client, +) *installer { + // Create status trackers. + statuses := make(map[string]*installStatus, len(backends)) + for name := range backends { + statuses[name] = &installStatus{ + installed: make(chan struct{}), + failed: make(chan struct{}), + } + } + + // Create the installer. + return &installer{ + log: log, + backends: backends, + httpClient: httpClient, + statuses: statuses, + } +} + +// run is the main run loop for the installer. +func (i *installer) run(ctx context.Context) { + // Mark the installer as having started. + i.started.Store(true) + + // Attempt to install each backend and update statuses. + // + // TODO: We may want to add a backoff + retry mechanism. + // + // TODO: We currently try to install all known backends. We may wish to add + // granular, per-backend settings. For now, with llama.cpp as our only + // ubiquitous backend and mlx as a relatively lightweight backend (on macOS + // only), this granularity is probably less of a concern. + for name, backend := range i.backends { + status := i.statuses[name] + + var installedClosed bool + select { + case <-status.installed: + installedClosed = true + default: + installedClosed = false + } + + if (status.err != nil && !errors.Is(status.err, context.Canceled)) || installedClosed { + continue + } + if err := backend.Install(ctx, i.httpClient); err != nil { + i.log.Warnf("Backend installation failed for %s: %v", name, err) + select { + case <-ctx.Done(): + status.err = errors.Join(errInstallerShuttingDown, ctx.Err()) + continue + default: + status.err = err + } + close(status.failed) + } else { + close(status.installed) + } + } +} + +// wait waits for installation of the specified backend to complete or fail. +func (i *installer) wait(ctx context.Context, backend string) error { + // Grab the backend status. + status, ok := i.statuses[backend] + if !ok { + return ErrBackendNotFound + } + + // If the installer hasn't started, then don't poll for readiness, because + // it may never come. If it has started, then even if it's cancelled we can + // be sure that we'll at least see failure for all backend installations. + if !i.started.Load() { + return errInstallerNotStarted + } + + // Wait for readiness. + select { + case <-ctx.Done(): + return context.Canceled + case <-status.installed: + return nil + case <-status.failed: + return status.err + } +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go new file mode 100644 index 00000000..60c1eec4 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/loader.go @@ -0,0 +1,521 @@ +package scheduling + +import ( + "context" + "errors" + "fmt" + "runtime" + "time" + + "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/inference/models" + "github.com/docker/model-runner/pkg/logging" +) + +const ( + // maximumRunnerSlots is the maximum number of runner slots allowed. + // TODO: We may wish to make this a tunable option, though for the time + // being it is almost certainly greater than the number of models that most + // developers' systems will be able to load. + maximumRunnerSlots = 8 + // runnerIdleTimeout is the maximum amount of time that a runner can sit + // idle (i.e. without any requests) before being terminated. + runnerIdleTimeout = 5 * time.Minute +) + +var ( + // errLoadsDisabled indicates that backend loads are currently disabled. + errLoadsDisabled = errors.New("backend loading disabled") + // errModelTooBig indicates that the model is too big to ever load into the + // available system memory. + errModelTooBig = errors.New("model too big") + // errRunnerAlreadyActive indicates that a given runner is already active + // and therefore can't be reconfigured for example + errRunnerAlreadyActive = errors.New("runner already active") +) + +// runnerKey is used to index runners. +type runnerKey struct { + // backend is the backend associated with the runner. + backend string + // model is the model associated with the runner. + model string + // mode is the operation mode associated with the runner. + mode inference.BackendMode +} + +// loader manages the loading and unloading of backend runners. It regulates +// active backends in a manner that avoids exhausting system resources. Loaders +// assume that all of their backends have been installed, so no load requests +// should be made until the caller is certain that the corresponding backend has +// been installed successfully. +type loader struct { + // log is the associated logger. + log logging.Logger + // backends are the supported inference backends. + backends map[string]inference.Backend + // modelManager is the shared model manager. + modelManager *models.Manager + // totalMemory is the total system memory allocated to the loader. + totalMemory uint64 + // idleCheck is used to signal the run loop when timestamps have updated. + idleCheck chan struct{} + // guard is a sempahore controlling access to all subsequent fields. It is + // buffered (with size 1) and contains a single element that must be held in + // order to operate on those fields. We use a channel (instead of a + // sync.Mutex) to enable polling. + guard chan struct{} + // loadsEnabled signals that loads are currently enabled. + loadsEnabled bool + // availableMemory is the available portion of the loader's total memory. + availableMemory uint64 + // waiters is the set of signal channels associated with waiting loaders. We + // use a set of signaling channels (instead of a sync.Cond) to enable + // polling. Each signaling channel should be buffered (with size 1). + waiters map[chan<- struct{}]bool + // runners maps runner keys to their slot index. + runners map[runnerKey]int + // slots maps slot indices to associated runners. A slot is considered free + // if the runner value in it is nil. + slots []*runner + // references maps slot indices to reference counts. + references []uint + // allocations maps slot indices to memory allocation sizes. + allocations []uint64 + // timestamps maps slot indices to last usage times. Values in this slice + // are only valid if the corresponding reference count is zero. + timestamps []time.Time + // runnerConfigs maps model names to runner configurations + runnerConfigs map[runnerKey]inference.BackendConfiguration +} + +// newLoader creates a new loader. +func newLoader( + log logging.Logger, + backends map[string]inference.Backend, + modelManager *models.Manager, +) *loader { + // Compute the number of runner slots to allocate. Because of RAM and VRAM + // limitations, it's unlikely that we'll ever be able to fully populate + // these slots, so for now we just choose a reasonable value. We may need to + // tune this heuristic for systems with enormous amounts of VRAM. + nSlots := min(runtime.NumCPU(), maximumRunnerSlots) + + // Compute the amount of available memory. + // + // TODO: For now, we treat the system as having memory size 1 and all models + // as having size 1 (and thus we'll only load a single model at a time). + // However, the loader is designed to use "real" values for each and to + // schedule appropriately. Thus, we should switch to polling the system + // VRAM size here (and potentially even reserving a portion of it) and + // computing model size through estimation (using parameter count and + // quantization data type size). + totalMemory := uint64(1) + + // Create the loader. + l := &loader{ + log: log, + backends: backends, + modelManager: modelManager, + totalMemory: totalMemory, + idleCheck: make(chan struct{}, 1), + guard: make(chan struct{}, 1), + availableMemory: totalMemory, + waiters: make(map[chan<- struct{}]bool), + runners: make(map[runnerKey]int, nSlots), + slots: make([]*runner, nSlots), + references: make([]uint, nSlots), + allocations: make([]uint64, nSlots), + timestamps: make([]time.Time, nSlots), + runnerConfigs: make(map[runnerKey]inference.BackendConfiguration), + } + l.guard <- struct{}{} + return l +} + +// lock acquires the guard semaphore. It returns true if the lock was acquired +// and false if ctx is cancelled before acquisition. +func (l *loader) lock(ctx context.Context) bool { + select { + case <-l.guard: + return true + case <-ctx.Done(): + return false + } +} + +// unlock releases the guard semaphore. +func (l *loader) unlock() { + l.guard <- struct{}{} +} + +// broadcast signals all waiters. Callers must hold the loader lock. +func (l *loader) broadcast() { + for waiter := range l.waiters { + select { + case waiter <- struct{}{}: + default: + } + } +} + +// evict evicts all unused runners from the loader. If idleOnly is true, then +// only those unused, but functioning, runners which are considered "idle" (based +// on usage timestamp) are evicted. Defunct (e.g. crashed) runners will be evicted +// regardless of whether they are considered "idle". The caller must hold the loader +// lock. It returns the number of remaining runners. +func (l *loader) evict(idleOnly bool) int { + now := time.Now() + for r, slot := range l.runners { + unused := l.references[slot] == 0 + idle := unused && now.Sub(l.timestamps[slot]) > runnerIdleTimeout + defunct := false + select { + case <-l.slots[slot].done: + defunct = true + default: + } + if unused && (!idleOnly || idle || defunct) { + l.log.Infof("Evicting %s backend runner with model %s in %s mode", + r.backend, r.model, r.mode, + ) + l.slots[slot].terminate() + l.slots[slot] = nil + l.availableMemory += l.allocations[slot] + l.allocations[slot] = 0 + l.timestamps[slot] = time.Time{} + delete(l.runners, r) + } + } + return len(l.runners) +} + +// evictRunner evicts a specific runner. The caller must hold the loader lock. +// It returns the number of remaining runners. +func (l *loader) evictRunner(backend, model string, mode inference.BackendMode) int { + allBackends := backend == "" + for r, slot := range l.runners { + unused := l.references[slot] == 0 + if unused && (allBackends || r.backend == backend) && r.model == model && r.mode == mode { + l.log.Infof("Evicting %s backend runner with model %s in %s mode", + r.backend, r.model, r.mode, + ) + l.slots[slot].terminate() + l.slots[slot] = nil + l.availableMemory += l.allocations[slot] + l.allocations[slot] = 0 + l.timestamps[slot] = time.Time{} + delete(l.runners, r) + } + } + return len(l.runners) +} + +// Unload unloads runners and returns the number of unloaded runners. +func (l *loader) Unload(ctx context.Context, unload UnloadRequest) int { + if !l.lock(ctx) { + return 0 + } + defer l.unlock() + + return len(l.runners) - func() int { + if unload.All { + l.runnerConfigs = make(map[runnerKey]inference.BackendConfiguration) + return l.evict(false) + } else { + for _, model := range unload.Models { + delete(l.runnerConfigs, runnerKey{unload.Backend, model, inference.BackendModeCompletion}) + // Evict both, completion and embedding models. We should consider + // accepting a mode parameter in unload requests. + l.evictRunner(unload.Backend, model, inference.BackendModeCompletion) + l.evictRunner(unload.Backend, model, inference.BackendModeEmbedding) + } + return len(l.runners) + } + }() +} + +// stopAndDrainTimer stops and drains a timer without knowing if it was running. +func stopAndDrainTimer(timer *time.Timer) { + timer.Stop() + select { + case <-timer.C: + default: + } +} + +// idleCheckDuration computes the duration until the next idle runner eviction +// should occur. The caller must hold the loader lock. If no runners are unused, +// then -1 seconds is returned. If any unused runners are already expired, then +// 0 seconds is returned. Otherwise a time in the future at which eviction +// should occur is returned. +func (l *loader) idleCheckDuration() time.Duration { + // Compute the oldest usage time for any idle runner. + var oldest time.Time + for _, slot := range l.runners { + if l.references[slot] == 0 { + timestamp := l.timestamps[slot] + if oldest.IsZero() || timestamp.Before(oldest) { + oldest = timestamp + } + } + } + + // If there are no unused runners, then don't schedule a check. + if oldest.IsZero() { + return -1 * time.Second + } + + // Compute the remaining duration. If negative, check immediately, otherwise + // wait until 100 milliseconds after expiration time (to avoid checking + // right on the expiration boundary). + if remaining := runnerIdleTimeout - time.Since(oldest); remaining < 0 { + return 0 + } else { + return remaining + 100*time.Millisecond + } +} + +// run is the run loop for the loader. It drives idle runner eviction. By the +// time run returns, all runners will have been evicted. +func (l *loader) run(ctx context.Context) { + // Signal that loads are enabled. There's no need to broadcast here because + // no loaders will wait if they see that loads are disabled. + if !l.lock(ctx) { + return + } + l.loadsEnabled = true + l.unlock() + + // Defer disablement of loads and wait for complete eviction. + defer func() { + poll := make(chan struct{}, 1) + poll <- struct{}{} // Trigger an initial polling in case all are unused. + l.lock(context.Background()) + l.loadsEnabled = false + l.broadcast() + l.waiters[poll] = true + l.unlock() + for range poll { + l.lock(context.Background()) + if l.evict(false) == 0 { + delete(l.waiters, poll) + l.unlock() + break + } + l.unlock() + } + }() + + // Create a timer that we'll use to drive idle eviction. Ensure that it's + // stopped by the time we exit. + idleTimer := time.NewTimer(0) + if !idleTimer.Stop() { + <-idleTimer.C + } + defer idleTimer.Stop() + + // Evict idle runners. + for { + select { + case <-ctx.Done(): + return + case <-idleTimer.C: + // Perform eviction. + if l.lock(ctx) { + l.evict(true) + if nextCheck := l.idleCheckDuration(); nextCheck >= 0 { + idleTimer.Reset(nextCheck) + } + l.unlock() + } + case <-l.idleCheck: + // Compute the next idle check time. + if l.lock(ctx) { + stopAndDrainTimer(idleTimer) + if nextCheck := l.idleCheckDuration(); nextCheck >= 0 { + idleTimer.Reset(nextCheck) + } + l.unlock() + } + } + } +} + +// load allocates a runner using the specified backend and model. If allocated, +// it should be released by the caller using the release mechanism (once the +// runner is no longer needed). +func (l *loader) load(ctx context.Context, backendName, model string, mode inference.BackendMode) (*runner, error) { + // Grab the backend. + backend, ok := l.backends[backendName] + if !ok { + return nil, ErrBackendNotFound + } + + // Estimate the amount of memory that will be used by the model and check + // that we're even capable of loading it. + // + // TODO: For now, we treat the system as having memory size 1 and all models + // as having size 1 (and thus we'll only load a single model at a time). + // However, the loader is designed to use "real" values for each and to + // schedule appropriately. Thus, we should switch to computing model size + // here through estimation (using parameter count and quantization data type + // size). + memory := uint64(1) + if memory > l.totalMemory { + return nil, errModelTooBig + } + + // Acquire the loader lock and defer its release. + if !l.lock(ctx) { + return nil, context.Canceled + } + defer l.unlock() + + // Create a polling channel that we can use to detect state changes and + // ensure that it's deregistered by the time we return. + poll := make(chan struct{}, 1) + l.waiters[poll] = true + defer func() { + delete(l.waiters, poll) + }() + + // Loop until we can satisfy the request or an error occurs. + for { + slot := -1 + + // If loads are disabled, then there's nothing we can do. + if !l.loadsEnabled { + return nil, errLoadsDisabled + } + + // See if we can satisfy the request with an existing runner. + existing, ok := l.runners[runnerKey{backendName, model, mode}] + if ok { + select { + case <-l.slots[existing].done: + l.log.Warnf("%s runner for %s is defunct. Waiting for it to be evicted.", backendName, model) + goto WaitForChange + default: + l.references[existing] += 1 + l.timestamps[existing] = time.Time{} + return l.slots[existing], nil + } + } + + // If there's not sufficient memory or all slots are full, then try + // evicting unused runners. + if memory > l.availableMemory || len(l.runners) == len(l.slots) { + l.evict(false) + } + + // If there's sufficient memory and a free slot, then find the slot. + if memory <= l.availableMemory && len(l.runners) < len(l.slots) { + for s, runner := range l.slots { + if runner == nil { + slot = s + break + } + } + } + + // If we've identified a slot, then we're ready to start a runner. + if slot >= 0 { + var runnerConfig *inference.BackendConfiguration + if rc, ok := l.runnerConfigs[runnerKey{backendName, model, mode}]; ok { + runnerConfig = &rc + } + // Create the runner. + l.log.Infof("Loading %s backend runner with model %s in %s mode", backendName, model, mode) + runner, err := run(l.log, backend, model, mode, slot, runnerConfig) + if err != nil { + l.log.Warnf("Unable to start %s backend runner with model %s in %s mode: %v", + backendName, model, mode, err, + ) + return nil, fmt.Errorf("unable to start runner: %w", err) + } + + // Wait for the runner to be ready. In theory it's a little + // inefficient to block all other loaders (including those that + // might not want this runner), but in reality they would probably + // be blocked by the underlying loading anyway (in terms of disk and + // GPU performance). We have to retain a lock here though to enforce + // deduplication of runners and keep slot / memory reservations. + if err := runner.wait(ctx); err != nil { + runner.terminate() + l.log.Warnf("Initialization for %s backend runner with model %s in %s mode failed: %v", + backendName, model, mode, err, + ) + return nil, fmt.Errorf("error waiting for runner to be ready: %w", err) + } + + // Perform registration and return the runner. + l.availableMemory -= memory + l.runners[runnerKey{backendName, model, mode}] = slot + l.slots[slot] = runner + l.references[slot] = 1 + l.allocations[slot] = memory + return runner, nil + } + + // Wait for something to change. Note that we always re-lock with + // context.Background() because we need to ensure we hold the lock by + // the time we return. + WaitForChange: + l.unlock() + select { + case <-ctx.Done(): + l.lock(context.Background()) + return nil, context.Canceled + case <-poll: + l.lock(context.Background()) + } + } +} + +// release releases a runner, which internally decrements its reference count. +func (l *loader) release(runner *runner) { + // Acquire the loader lock and defer its release. + l.lock(context.Background()) + defer l.unlock() + + // Determine the runner's slot. + slot := l.runners[runnerKey{runner.backend.Name(), runner.model, runner.mode}] + + // Decrement the runner's reference count. + l.references[slot] -= 1 + + // If the runner's reference count is now zero, then check if it is still + // active, and record now as its idle start time and signal the idle + // checker. + if l.references[slot] == 0 { + select { + case <-runner.done: + l.evictRunner(runner.backend.Name(), runner.model, runner.mode) + default: + l.timestamps[slot] = time.Now() + select { + case l.idleCheck <- struct{}{}: + default: + } + } + } + + // Signal waiters. + l.broadcast() +} + +func (l *loader) setRunnerConfig(ctx context.Context, backendName, model string, mode inference.BackendMode, runnerConfig inference.BackendConfiguration) error { + l.lock(ctx) + defer l.unlock() + + runnerId := runnerKey{backendName, model, mode} + + if _, ok := l.runners[runnerId]; ok { + return errRunnerAlreadyActive + } + + l.log.Infof("Configuring %s runner for %s", backendName, model) + l.runnerConfigs[runnerId] = runnerConfig + return nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go new file mode 100644 index 00000000..43f28e48 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/runner.go @@ -0,0 +1,232 @@ +package scheduling + +import ( + "context" + "errors" + "fmt" + "io" + logpkg "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/logging" +) + +const ( + // maximumReadinessPings is the maximum number of retries that a runner will + // perform when pinging a backend for readiness. + maximumReadinessPings = 600 + // readinessRetryInterval is the interval at which a runner will retry + // readiness checks for a backend. + readinessRetryInterval = 500 * time.Millisecond +) + +// errBackendNotReadyInTime indicates that an inference backend took too +// long to initialize and respond to a readiness request. +var errBackendNotReadyInTime = errors.New("inference backend took too long to initialize") + +// errBackendQuitUnexpectedly indicates that an inference backend terminated +// unexpectedly +var errBackendQuitUnexpectedly = errors.New("inference backend quit unexpectedly") + +// RunnerSocketPath determines the Unix domain socket path used to communicate +// with runners at the specified slot. It can be overridden during init(). +var RunnerSocketPath = func(slot int) (string, error) { + return fmt.Sprintf("inference-runner-%d.sock", slot), nil +} + +// runner executes a given backend with a given model and provides reverse +// proxying to that backend. +type runner struct { + // log is the component logger. + log logging.Logger + // backend is the associated backend. + backend inference.Backend + // model is the associated model. + model string + // mode is the backend operation mode. + mode inference.BackendMode + // cancel terminates the runner's backend run loop. + cancel context.CancelFunc + // done is closed when the runner's backend run loop exits. + done <-chan struct{} + // transport is a transport targeting the runner's socket. + transport *http.Transport + // client is a client targeting the runner's HTTP server. + client *http.Client + // proxy is a reverse proxy targeting the runner's HTTP server. + proxy *httputil.ReverseProxy + // proxyLog is the stream used for logging by proxy. + proxyLog io.Closer + // err is the error returned by the runner's backend, only valid after done is closed. + err error +} + +// run creates a new runner instance. +func run( + log logging.Logger, + backend inference.Backend, + model string, + mode inference.BackendMode, + slot int, + runnerConfig *inference.BackendConfiguration, +) (*runner, error) { + // Create a dialer / transport that target backend on the specified slot. + socket, err := RunnerSocketPath(slot) + if err != nil { + return nil, fmt.Errorf("unable to determine runner socket path: %w", err) + } + dialer := &net.Dialer{} + transport := &http.Transport{ + DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) { + return dialer.DialContext(ctx, "unix", socket) + }, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + // Create a client that we can use internally to ping the backend. + client := &http.Client{Transport: transport} + + // Create a reverse proxy targeting the backend. The virtual URL that we use + // here is merely a placeholder; the transport always dials the backend HTTP + // endpoint and the hostname is always overwritten in the proxy. This URL is + // not accessible from anywhere. + upstream, err := url.Parse("http://inference.docker.internal") + if err != nil { + return nil, fmt.Errorf("unable to parse virtual backend URL: %w", err) + } + proxy := httputil.NewSingleHostReverseProxy(upstream) + standardDirector := proxy.Director + proxy.Director = func(r *http.Request) { + standardDirector(r) + // HACK: Most backends will be happier with a "localhost" hostname than + // an "inference.docker.internal" hostname (which they may reject). + r.Host = "localhost" + // Remove the prefix up to the OpenAI API root. + r.URL.Path = trimRequestPathToOpenAIRoot(r.URL.Path) + r.URL.RawPath = trimRequestPathToOpenAIRoot(r.URL.RawPath) + } + proxy.Transport = transport + proxyLog := log.Writer() + proxy.ErrorLog = logpkg.New(proxyLog, "", 0) + + // Create a cancellable context to regulate the runner's backend run loop + // and a channel to track its termination. + runCtx, runCancel := context.WithCancel(context.Background()) + runDone := make(chan struct{}) + + r := &runner{ + log: log, + backend: backend, + model: model, + mode: mode, + cancel: runCancel, + done: runDone, + transport: transport, + client: client, + proxy: proxy, + proxyLog: proxyLog, + } + + proxy.ErrorHandler = func(w http.ResponseWriter, req *http.Request, err error) { + // If the error is EOF, the underlying runner likely bailed, and closed its socket + // unexpectedly. Wait for the runner process to complete, but time out in case + // the runner process only killed its comms and is stuck. + if errors.Is(err, io.EOF) { + w.WriteHeader(http.StatusInternalServerError) + select { + case <-r.done: + return + case <-time.After(30 * time.Second): + } + } else { + w.WriteHeader(http.StatusBadGateway) + } + } + + // Start the backend run loop. + go func() { + if err := backend.Run(runCtx, socket, model, mode, runnerConfig); err != nil { + log.Warnf("Backend %s running model %s exited with error: %v", + backend.Name(), model, err, + ) + r.err = err + } + close(runDone) + }() + + // Create the runner. + return r, nil +} + +// wait waits for the runner to be ready. +func (r *runner) wait(ctx context.Context) error { + // Loop and poll for readiness. + for p := 0; p < maximumReadinessPings; p++ { + select { + case <-r.done: + if r.err == nil { + return errBackendQuitUnexpectedly + } + return r.err + default: + } + // Create and execute a request targeting a known-valid endpoint. + readyRequest, err := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost/v1/models", http.NoBody) + if err != nil { + return fmt.Errorf("readiness request creation failed: %w", err) + } + response, err := r.client.Do(readyRequest) + if err == nil { + response.Body.Close() + } + + // If the request failed, then wait (if appropriate) and try again. + if err != nil || response.StatusCode != http.StatusOK { + if p < (maximumReadinessPings - 1) { + select { + case <-time.After(readinessRetryInterval): + continue + case <-ctx.Done(): + return context.Canceled + } + } + break + } + + // The backend responded successfully. + return nil + } + + // The backend did not initialize and respond in time. + return errBackendNotReadyInTime +} + +// terminate stops the runner instance and waits for it to unload from memory. +func (r *runner) terminate() { + // Signal termination and wait for the run loop to exit. + r.cancel() + <-r.done + + // Close any idle connections. + r.client.CloseIdleConnections() + r.transport.CloseIdleConnections() + + // Close the proxy's log. + if err := r.proxyLog.Close(); err != nil { + r.log.Warnf("Unable to close reverse proxy log writer: %v", err) + } +} + +// ServeHTTP implements net/http.Handler.ServeHTTP. It forwards requests to the +// backend's HTTP server. +func (r *runner) ServeHTTP(w http.ResponseWriter, req *http.Request) { + r.proxy.ServeHTTP(w, req) +} diff --git a/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go new file mode 100644 index 00000000..8fbe721d --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/inference/scheduling/scheduler.go @@ -0,0 +1,413 @@ +package scheduling + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/model-distribution/distribution" + "github.com/docker/model-runner/pkg/inference" + "github.com/docker/model-runner/pkg/inference/models" + "github.com/docker/model-runner/pkg/logging" + "github.com/docker/model-runner/pkg/metrics" + "github.com/mattn/go-shellwords" + "golang.org/x/sync/errgroup" +) + +// Scheduler is used to coordinate inference scheduling across multiple backends +// and models. +type Scheduler struct { + // log is the associated logger. + log logging.Logger + // backends are the supported inference backends. + backends map[string]inference.Backend + // defaultBackend is the default inference backend. It may be nil. + defaultBackend inference.Backend + // modelManager is the shared model manager. + modelManager *models.Manager + // installer is the backend installer. + installer *installer + // loader is the backend loader. + loader *loader + // router is the HTTP request router. + router *http.ServeMux + // tracker is the metrics tracker. + tracker *metrics.Tracker + // lock is used to synchronize access to the scheduler's router. + lock sync.Mutex +} + +// NewScheduler creates a new inference scheduler. +func NewScheduler( + log logging.Logger, + backends map[string]inference.Backend, + defaultBackend inference.Backend, + modelManager *models.Manager, + httpClient *http.Client, + allowedOrigins []string, + tracker *metrics.Tracker, +) *Scheduler { + // Create the scheduler. + s := &Scheduler{ + log: log, + backends: backends, + defaultBackend: defaultBackend, + modelManager: modelManager, + installer: newInstaller(log, backends, httpClient), + loader: newLoader(log, backends, modelManager), + router: http.NewServeMux(), + tracker: tracker, + } + + // Register routes. + s.router.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "not found", http.StatusNotFound) + }) + + for route, handler := range s.routeHandlers(allowedOrigins) { + s.router.HandleFunc(route, handler) + } + + // Scheduler successfully initialized. + return s +} + +func (s *Scheduler) RebuildRoutes(allowedOrigins []string) { + s.lock.Lock() + defer s.lock.Unlock() + // Clear existing routes and re-register them. + s.router = http.NewServeMux() + // Register routes. + s.router.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, "not found", http.StatusNotFound) + }) + for route, handler := range s.routeHandlers(allowedOrigins) { + s.router.HandleFunc(route, handler) + } +} + +func (s *Scheduler) routeHandlers(allowedOrigins []string) map[string]http.HandlerFunc { + openAIRoutes := []string{ + "POST " + inference.InferencePrefix + "/{backend}/v1/chat/completions", + "POST " + inference.InferencePrefix + "/{backend}/v1/completions", + "POST " + inference.InferencePrefix + "/{backend}/v1/embeddings", + "POST " + inference.InferencePrefix + "/v1/chat/completions", + "POST " + inference.InferencePrefix + "/v1/completions", + "POST " + inference.InferencePrefix + "/v1/embeddings", + } + m := make(map[string]http.HandlerFunc) + for _, route := range openAIRoutes { + m[route] = inference.CorsMiddleware(allowedOrigins, http.HandlerFunc(s.handleOpenAIInference)).ServeHTTP + // Register OPTIONS for CORS preflight. + optionsRoute := "OPTIONS " + route[strings.Index(route, " "):] + m[optionsRoute] = inference.CorsMiddleware(allowedOrigins, http.HandlerFunc(s.handleOpenAIInference)).ServeHTTP + } + m["GET "+inference.InferencePrefix+"/status"] = s.GetBackendStatus + m["GET "+inference.InferencePrefix+"/ps"] = s.GetRunningBackends + m["GET "+inference.InferencePrefix+"/df"] = s.GetDiskUsage + m["POST "+inference.InferencePrefix+"/unload"] = s.Unload + m["POST "+inference.InferencePrefix+"/{backend}/_configure"] = s.Configure + m["POST "+inference.InferencePrefix+"/_configure"] = s.Configure + return m +} + +func (s *Scheduler) GetRoutes() []string { + routeHandlers := s.routeHandlers(nil) + routes := make([]string, 0, len(routeHandlers)) + for route := range routeHandlers { + routes = append(routes, route) + } + return routes +} + +// Run is the scheduler's main run loop. By the time it returns, all inference +// backends will have been unloaded from memory. +func (s *Scheduler) Run(ctx context.Context) error { + // Create an error group to track worker Goroutines. + workers, workerCtx := errgroup.WithContext(ctx) + + // Start the installer. + workers.Go(func() error { + s.installer.run(workerCtx) + return nil + }) + + // Start the loader. + workers.Go(func() error { + s.loader.run(workerCtx) + return nil + }) + + // Wait for all workers to exit. + return workers.Wait() +} + +// handleOpenAIInference handles scheduling and responding to OpenAI inference +// requests, including: +// - POST /{backend}/v1/chat/completions +// - POST /{backend}/v1/completions +// - POST /{backend}/v1/embeddings +func (s *Scheduler) handleOpenAIInference(w http.ResponseWriter, r *http.Request) { + // Determine the requested backend and ensure that it's valid. + var backend inference.Backend + if b := r.PathValue("backend"); b == "" { + backend = s.defaultBackend + } else { + backend = s.backends[b] + } + if backend == nil { + http.Error(w, ErrBackendNotFound.Error(), http.StatusNotFound) + return + } + + // Read the entire request body. We put some basic size constraints in place + // to avoid DoS attacks. We do this early to avoid client write timeouts. + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maximumOpenAIInferenceRequestSize)) + if err != nil { + if _, ok := err.(*http.MaxBytesError); ok { + http.Error(w, "request too large", http.StatusBadRequest) + } else { + http.Error(w, "unknown error", http.StatusInternalServerError) + } + return + } + + // Wait for the corresponding backend installation to complete or fail. We + // don't allow any requests to be scheduled for a backend until it has + // completed installation. + if err := s.installer.wait(r.Context(), backend.Name()); err != nil { + if errors.Is(err, ErrBackendNotFound) { + http.Error(w, err.Error(), http.StatusNotFound) + } else if errors.Is(err, errInstallerNotStarted) { + http.Error(w, err.Error(), http.StatusServiceUnavailable) + } else if errors.Is(err, context.Canceled) { + // This could be due to the client aborting the request (in which + // case this response will be ignored) or the inference service + // shutting down (since that will also cancel the request context). + // Either way, provide a response, even if it's ignored. + http.Error(w, "service unavailable", http.StatusServiceUnavailable) + } else { + http.Error(w, fmt.Errorf("backend installation failed: %w", err).Error(), http.StatusServiceUnavailable) + } + return + } + + // Determine the backend operation mode. + backendMode, ok := backendModeForRequest(r.URL.Path) + if !ok { + http.Error(w, "unknown request path", http.StatusInternalServerError) + return + } + + // Decode the model specification portion of the request body. + var request OpenAIInferenceRequest + if err := json.Unmarshal(body, &request); err != nil { + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + if request.Model == "" { + http.Error(w, "model is required", http.StatusBadRequest) + return + } + + // Check if the shared model manager has the requested model available. + if !backend.UsesExternalModelManagement() { + model, err := s.modelManager.GetModel(request.Model) + if err != nil { + if errors.Is(err, distribution.ErrModelNotFound) { + http.Error(w, err.Error(), http.StatusNotFound) + } else { + http.Error(w, "model unavailable", http.StatusInternalServerError) + } + return + } + // Non-blocking call to track the model usage. + s.tracker.TrackModel(model) + } + + // Request a runner to execute the request and defer its release. + runner, err := s.loader.load(r.Context(), backend.Name(), request.Model, backendMode) + if err != nil { + http.Error(w, fmt.Errorf("unable to load runner: %w", err).Error(), http.StatusInternalServerError) + return + } + defer s.loader.release(runner) + + // Create a request with the body replaced for forwarding upstream. + upstreamRequest := r.Clone(r.Context()) + upstreamRequest.Body = io.NopCloser(bytes.NewReader(body)) + + // Perform the request. + runner.ServeHTTP(w, upstreamRequest) +} + +func (s *Scheduler) GetBackendStatus(w http.ResponseWriter, r *http.Request) { + status := make(map[string]string) + for backendName, backend := range s.backends { + status[backendName] = backend.Status() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(status) +} + +func (s *Scheduler) ResetInstaller(httpClient *http.Client) { + s.installer = newInstaller(s.log, s.backends, httpClient) +} + +// GetRunningBackends returns information about all running backends +func (s *Scheduler) GetRunningBackends(w http.ResponseWriter, r *http.Request) { + runningBackends := s.getLoaderStatus(r.Context()) + + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(runningBackends); err != nil { + http.Error(w, fmt.Sprintf("Failed to encode response: %v", err), http.StatusInternalServerError) + return + } +} + +// getLoaderStatus returns information about all running backends managed by the loader +func (s *Scheduler) getLoaderStatus(ctx context.Context) []BackendStatus { + if !s.loader.lock(ctx) { + return []BackendStatus{} + } + defer s.loader.unlock() + + result := make([]BackendStatus, 0, len(s.loader.runners)) + + for key, slot := range s.loader.runners { + if s.loader.slots[slot] != nil { + status := BackendStatus{ + BackendName: key.backend, + ModelName: key.model, + Mode: key.mode.String(), + LastUsed: time.Time{}, + } + + if s.loader.references[slot] == 0 { + status.LastUsed = s.loader.timestamps[slot] + } + + result = append(result, status) + } + } + + return result +} + +func (s *Scheduler) GetDiskUsage(w http.ResponseWriter, _ *http.Request) { + modelsDiskUsage, err, httpCode := s.modelManager.GetDiskUsage() + if err != nil { + http.Error(w, fmt.Sprintf("Failed to get models disk usage: %v", err), httpCode) + return + } + + // TODO: Get disk usage for each backend once the backends are implemented. + defaultBackendDiskUsage, err := s.defaultBackend.GetDiskUsage() + if err != nil { + http.Error(w, fmt.Sprintf("Failed to get disk usage for %s: %v", s.defaultBackend.Name(), err), http.StatusInternalServerError) + return + } + + diskUsage := DiskUsage{modelsDiskUsage, defaultBackendDiskUsage} + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(diskUsage); err != nil { + http.Error(w, fmt.Sprintf("Failed to encode response: %v", err), http.StatusInternalServerError) + return + } +} + +// Unload unloads the specified runners (backend, model) from the backend. +// Currently, this doesn't work for runners that are handling an OpenAI request. +func (s *Scheduler) Unload(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maximumOpenAIInferenceRequestSize)) + if err != nil { + if _, ok := err.(*http.MaxBytesError); ok { + http.Error(w, "request too large", http.StatusBadRequest) + } else { + http.Error(w, "unknown error", http.StatusInternalServerError) + } + return + } + + var unloadRequest UnloadRequest + if err := json.Unmarshal(body, &unloadRequest); err != nil { + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + + unloadedRunners := UnloadResponse{s.loader.Unload(r.Context(), unloadRequest)} + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(unloadedRunners); err != nil { + http.Error(w, fmt.Sprintf("Failed to encode response: %v", err), http.StatusInternalServerError) + return + } +} + +func (s *Scheduler) Configure(w http.ResponseWriter, r *http.Request) { + // Determine the requested backend and ensure that it's valid. + var backend inference.Backend + if b := r.PathValue("backend"); b == "" { + backend = s.defaultBackend + } else { + backend = s.backends[b] + } + if backend == nil { + http.Error(w, ErrBackendNotFound.Error(), http.StatusNotFound) + return + } + + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maximumOpenAIInferenceRequestSize)) + if err != nil { + if _, ok := err.(*http.MaxBytesError); ok { + http.Error(w, "request too large", http.StatusBadRequest) + } else { + http.Error(w, "unknown error", http.StatusInternalServerError) + } + return + } + + configureRequest := ConfigureRequest{ + Model: "", + ContextSize: -1, + RawRuntimeFlags: "", + } + if err := json.Unmarshal(body, &configureRequest); err != nil { + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + rawFlags, err := shellwords.Parse(configureRequest.RawRuntimeFlags) + if err != nil { + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + + var runnerConfig inference.BackendConfiguration + runnerConfig.ContextSize = configureRequest.ContextSize + runnerConfig.RawFlags = rawFlags + + if err := s.loader.setRunnerConfig(r.Context(), backend.Name(), configureRequest.Model, inference.BackendModeCompletion, runnerConfig); err != nil { + s.log.Warnf("Failed to configure %s runner for %s: %s", backend.Name(), configureRequest.Model, err) + if errors.Is(err, errRunnerAlreadyActive) { + http.Error(w, err.Error(), http.StatusConflict) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + w.WriteHeader(http.StatusAccepted) +} + +// ServeHTTP implements net/http.Handler.ServeHTTP. +func (s *Scheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.lock.Lock() + defer s.lock.Unlock() + s.router.ServeHTTP(w, r) +} diff --git a/vendor/github.com/docker/model-runner/pkg/internal/archive/archive.go b/vendor/github.com/docker/model-runner/pkg/internal/archive/archive.go new file mode 100644 index 00000000..e31bf678 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/internal/archive/archive.go @@ -0,0 +1,42 @@ +package archive + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckRelative returns an error if the filename path escapes dir. +// This is used to protect against path traversal attacks when extracting archives. +// It also rejects absolute filename paths. +func CheckRelative(dir, filename string) (string, error) { + if filepath.IsAbs(filename) { + return "", fmt.Errorf("archive path has absolute path: %q", filename) + } + target := filepath.Join(dir, filename) + if resolved, err := filepath.EvalSymlinks(target); err == nil { + target = resolved + if resolved, err = filepath.EvalSymlinks(dir); err == nil { + dir = resolved + } + } + rel, err := filepath.Rel(dir, target) + if err != nil { + return "", err + } + if strings.HasPrefix(rel, "..") { + return "", fmt.Errorf("archive file %q escapes %q", target, dir) + } + return target, nil +} + +// CheckSymlink returns an error if the link path escapes dir. +// This is used to protect against path traversal attacks when extracting archives. +// It also rejects absolute linkname paths. +func CheckSymlink(dir, name, linkname string) error { + if filepath.IsAbs(linkname) { + return fmt.Errorf("archive path has absolute link: %q", linkname) + } + _, err := CheckRelative(dir, filepath.Join(filepath.Dir(name), linkname)) + return err +} diff --git a/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/download.go b/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/download.go new file mode 100644 index 00000000..a2008323 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/download.go @@ -0,0 +1,136 @@ +package dockerhub + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/images/archive" + "github.com/containerd/containerd/v2/core/remotes" + "github.com/containerd/containerd/v2/core/remotes/docker" + "github.com/containerd/containerd/v2/plugins/content/local" + "github.com/containerd/platforms" + "github.com/docker/model-runner/pkg/internal/jsonutil" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func PullPlatform(ctx context.Context, image, destination, requiredOs, requiredArch string) error { + if err := os.MkdirAll(filepath.Dir(destination), 0o755); err != nil { + return fmt.Errorf("creating destination directory %s: %w", filepath.Dir(destination), err) + } + output, err := os.Create(destination) + if err != nil { + return fmt.Errorf("creating destination file %s: %w", destination, err) + } + tmpDir, err := os.MkdirTemp("", "docker-pull") + if err != nil { + return fmt.Errorf("creating temp directory: %w", err) + } + defer os.RemoveAll(tmpDir) + store, err := local.NewStore(tmpDir) + if err != nil { + return fmt.Errorf("creating new content store: %w", err) + } + desc, err := retry(ctx, 10, 1*time.Second, func() (*v1.Descriptor, error) { return fetch(ctx, store, image, requiredOs, requiredArch) }) + if err != nil { + return fmt.Errorf("fetching image: %w", err) + } + return archive.Export(ctx, store, output, archive.WithManifest(*desc, image), archive.WithSkipMissing(store)) +} + +func retry(ctx context.Context, attempts int, sleep time.Duration, f func() (*v1.Descriptor, error)) (*v1.Descriptor, error) { + var err error + var result *v1.Descriptor + for i := 0; i < attempts; i++ { + if i > 0 { + log.Printf("retry %d after error: %v\n", i, err) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(sleep): + } + } + result, err = f() + if err == nil { + return result, nil + } + } + return nil, fmt.Errorf("after %d attempts, last error: %s", attempts, err) +} + +func fetch(ctx context.Context, store content.Store, ref, requiredOs, requiredArch string) (*v1.Descriptor, error) { + resolver := docker.NewResolver(docker.ResolverOptions{ + Hosts: docker.ConfigureDefaultRegistries( + docker.WithAuthorizer( + docker.NewDockerAuthorizer( + docker.WithAuthCreds(dockerCredentials)))), + }) + name, desc, err := resolver.Resolve(ctx, ref) + if err != nil { + return nil, err + } + fetcher, err := resolver.Fetcher(ctx, name) + if err != nil { + return nil, err + } + + childrenHandler := images.ChildrenHandler(store) + if requiredOs != "" && requiredArch != "" { + requiredPlatform := platforms.Only(v1.Platform{OS: requiredOs, Architecture: requiredArch}) + childrenHandler = images.LimitManifests(images.FilterPlatforms(images.ChildrenHandler(store), requiredPlatform), requiredPlatform, 1) + } + h := images.Handlers(remotes.FetchHandler(store, fetcher), childrenHandler) + if err := images.Dispatch(ctx, h, nil, desc); err != nil { + return nil, err + } + return &desc, nil +} + +func dockerCredentials(host string) (string, string, error) { + hubUsername, hubPassword := os.Getenv("DOCKER_HUB_USER"), os.Getenv("DOCKER_HUB_PASSWORD") + if hubUsername != "" && hubPassword != "" { + return hubUsername, hubPassword, nil + } + logrus.WithField("host", host).Debug("checking for registry auth config") + home, err := os.UserHomeDir() + if err != nil { + return "", "", err + } + credentialConfig := filepath.Join(home, ".docker", "config.json") + cfg := struct { + Auths map[string]struct { + Auth string + } + }{} + if err := jsonutil.ReadFile(credentialConfig, &cfg); err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", "", nil + } + return "", "", err + } + for h, r := range cfg.Auths { + if h == host { + creds, err := base64.StdEncoding.DecodeString(r.Auth) + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(creds), ":", 2) + if len(parts) != 2 { + logrus.Debugf("skipping not user/password auth for registry %s: %s", host, parts[0]) + return "", "", nil + } + logrus.Debugf("using auth for registry %s: user=%s", host, parts[0]) + return parts[0], parts[1], nil + } + } + return "", "", nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/extract.go b/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/extract.go new file mode 100644 index 00000000..3ece97aa --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/internal/dockerhub/extract.go @@ -0,0 +1,224 @@ +package dockerhub + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/model-runner/pkg/internal/archive" + "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestNotFoundError struct { + os string + platform string +} + +func (r *ManifestNotFoundError) Error() string { + return fmt.Sprintf("unable to find manifest for %s/%s", r.os, r.platform) +} + +func newManifestNotFoundError(os, platform string) *ManifestNotFoundError { + return &ManifestNotFoundError{ + os: os, + platform: platform, + } +} + +// Extract all files from a `docker save` tarFile matching a given architecture +// and OS to destination. Note this doesn't handle files which have been deleted +// in layers. +func Extract(tarFile, architecture, OS, destination string) error { + tmpDir, err := os.MkdirTemp("", "docker-tar-extract") + if err != nil { + return fmt.Errorf("creating temp directory for untar: %w", err) + } + defer os.RemoveAll(tmpDir) + if err := unTarFile(tarFile, tmpDir); err != nil { + return fmt.Errorf("untaring %s: %w", tarFile, err) + } + return extract(tmpDir, architecture, OS, destination) +} + +func unTarFile(tarFile, destinationFolder string) error { + from, err := os.Open(tarFile) + if err != nil { + return fmt.Errorf("opening tar file %s: %w", tarFile, err) + } + defer from.Close() + return unTar(from, destinationFolder) +} + +func unTar(from io.Reader, destinationFolder string) error { + tarReader := tar.NewReader(from) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + path, err := archive.CheckRelative(destinationFolder, header.Name) + if err != nil { + return err + } + info := header.FileInfo() + if info.IsDir() { + if err = os.MkdirAll(path, info.Mode()); err != nil { + return err + } + continue + } + + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + if err := archive.CheckSymlink(destinationFolder, header.Name, header.Linkname); err != nil { + return err + } + if err := os.Symlink(header.Linkname, path); err != nil { + return err + } + continue + } + + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, tarReader); err != nil { + _ = file.Close() + return err + } + if err := file.Close(); err != nil { + return err + } + } + return nil +} + +func extract(dir, architecture, OS, destination string) error { + indexJSON, err := os.ReadFile(filepath.Join(dir, "index.json")) + if err != nil { + return fmt.Errorf("reading %s/index.json: %w", dir, err) + } + var index v1.Index + if err := json.Unmarshal(indexJSON, &index); err != nil { + return fmt.Errorf("unmarshalling index: %w", err) + } + // Assume only one manifest for now + if len(index.Manifests) != 1 { + return errors.New("expected exactly one image manifest") + } + digest := index.Manifests[0].Digest + bs, err := readBlob(dir, digest) + if err != nil { + return fmt.Errorf("reading blob %s: %w", digest.String(), err) + } + mtype := index.Manifests[0].MediaType + // is it a manifest or a manifest list? + if mtype == mediaTypeManifest { + // not a multi-arch image + return extractFromDigest(dir, digest, destination) + } + if mtype == mediaTypeOCI { + return extractFromOCI(dir, digest, destination, OS, architecture) + } + if mtype != mediaTypeManifestList { + return fmt.Errorf("unknown mediaType in manifest: %s", mtype) + } + // multi-arch image so look up the Architecture and OS + var manifestList v1.Index + if err := json.Unmarshal(bs, &manifestList); err != nil { + return fmt.Errorf("unmarshalling manifest list: %w", err) + } + for _, m := range manifestList.Manifests { + if m.Platform.Architecture != architecture { + continue + } + if m.Platform.OS != OS { + continue + } + return extractFromDigest(dir, m.Digest, destination) + } + + return newManifestNotFoundError(OS, architecture) +} + +const ( + mediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" + mediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + mediaTypeOCI = "application/vnd.oci.image.index.v1+json" + mediaTypeOCIManifest = "application/vnd.oci.image.manifest.v1+json" + mediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" + mediaTypeOCILayer = "application/vnd.oci.image.layer.v1.tar+gzip" +) + +func blobPath(dir string, digest digest.Digest) string { + return filepath.Join(dir, "blobs", digest.Algorithm().String(), digest.Hex()) +} + +func readBlob(dir string, digest digest.Digest) ([]byte, error) { + return os.ReadFile(blobPath(dir, digest)) +} + +func extractFromOCI(dir string, digest digest.Digest, destination, OS, architecture string) error { + bs, err := readBlob(dir, digest) + if err != nil { + return fmt.Errorf("extracting digest %s: %w", digest.String(), err) + } + var index v1.Index + if err := json.Unmarshal(bs, &index); err != nil { + return fmt.Errorf("unmarshalling index: %w", err) + } + for _, manifest := range index.Manifests { + if manifest.MediaType == mediaTypeOCIManifest && manifest.Platform.OS == OS && manifest.Platform.Architecture == architecture { + return extractFromDigest(dir, manifest.Digest, destination) + } + } + return newManifestNotFoundError(OS, architecture) +} + +func extractFromDigest(dir string, digest digest.Digest, destination string) error { + bs, err := readBlob(dir, digest) + if err != nil { + return fmt.Errorf("extracting digest %s: %w", digest.String(), err) + } + var manifest v1.Manifest + if err := json.Unmarshal(bs, &manifest); err != nil { + return fmt.Errorf("unmarshalling manifest: %w", err) + } + for _, layer := range manifest.Layers { + if err := extractLayer(dir, layer, destination); err != nil { + return fmt.Errorf("extracting layer %s: %w", layer.Digest.String(), err) + } + } + return nil +} + +func extractLayer(dir string, layer v1.Descriptor, destination string) error { + fmt.Printf("descriptor %s has media type %s\n", layer.Digest.String(), layer.MediaType) + if layer.MediaType != mediaTypeLayer && layer.MediaType != mediaTypeOCILayer { + return fmt.Errorf("expected layer %s to have media type %s or %s, received %s", layer.Digest.String(), mediaTypeLayer, mediaTypeOCILayer, layer.MediaType) + } + f, err := os.Open(blobPath(dir, layer.Digest)) + if err != nil { + return fmt.Errorf("reading blob %s: %w", layer.Digest.String(), err) + } + defer f.Close() + gz, err := gzip.NewReader(f) + if err != nil { + return fmt.Errorf("decompressing %s: %w", layer.Digest.String(), err) + } + defer gz.Close() + return unTar(gz, destination) +} diff --git a/vendor/github.com/docker/model-runner/pkg/internal/jsonutil/jsonutil.go b/vendor/github.com/docker/model-runner/pkg/internal/jsonutil/jsonutil.go new file mode 100644 index 00000000..c9a2d0fc --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/internal/jsonutil/jsonutil.go @@ -0,0 +1,21 @@ +package jsonutil + +import ( + "encoding/json" + "fmt" + "os" +) + +// ReadFile parses the contents of a file as JSON. +func ReadFile[T any](path string, result T) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + dec := json.NewDecoder(f) + if err := dec.Decode(&result); err != nil { + return fmt.Errorf("parsing JSON: %w", err) + } + return nil +} diff --git a/vendor/github.com/docker/model-runner/pkg/metrics/metrics.go b/vendor/github.com/docker/model-runner/pkg/metrics/metrics.go new file mode 100644 index 00000000..11521985 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/metrics/metrics.go @@ -0,0 +1,91 @@ +package metrics + +import ( + "context" + "net/http" + "os" + "time" + + "github.com/docker/model-distribution/types" + "github.com/docker/model-runner/pkg/logging" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/sirupsen/logrus" +) + +type Tracker struct { + doNotTrack bool + transport http.RoundTripper + log logging.Logger + userAgent string +} + +type TrackerRoundTripper struct { + Transport http.RoundTripper +} + +func (h *TrackerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + clonedReq := req.Clone(req.Context()) + ctx, cancel := context.WithTimeout(clonedReq.Context(), 5*time.Second) + defer cancel() + clonedReq = clonedReq.WithContext(ctx) + clonedReq.Header.Set("x-docker-model-runner", "true") + return h.Transport.RoundTrip(clonedReq) +} + +func NewTracker(httpClient *http.Client, log logging.Logger, userAgent string, doNotTrack bool) *Tracker { + client := *httpClient + if client.Transport == nil { + client.Transport = http.DefaultTransport + } + + if userAgent == "" { + userAgent = "docker-model-runner" + } + + if os.Getenv("DEBUG") == "1" { + if logger, ok := log.(*logrus.Logger); ok { + logger.SetLevel(logrus.DebugLevel) + } else if entry, ok := log.(*logrus.Entry); ok { + entry.Logger.SetLevel(logrus.DebugLevel) + } + } + + return &Tracker{ + doNotTrack: os.Getenv("DO_NOT_TRACK") == "1" || doNotTrack, + transport: &TrackerRoundTripper{Transport: client.Transport}, + log: log, + userAgent: userAgent, + } +} + +func (t *Tracker) TrackModel(model types.Model) { + if t.doNotTrack { + return + } + + go t.trackModel(model) +} + +func (t *Tracker) trackModel(model types.Model) { + tags := model.Tags() + t.log.Debugln("Tracking model:", tags) + if len(tags) == 0 { + return + } + ref, err := name.ParseReference(tags[0]) + if err != nil { + t.log.Errorf("Error parsing reference: %v\n", err) + return + } + if _, err = remote.Head(ref, + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithTransport(t.transport), + remote.WithUserAgent(t.userAgent), + ); err != nil { + t.log.Debugf("Manifest does not exist or error occurred: %v\n", err) + return + } + t.log.Debugln("Tracked", ref.Name(), ref.Identifier()) +} diff --git a/vendor/github.com/docker/model-runner/pkg/tailbuffer/tailbuffer.go b/vendor/github.com/docker/model-runner/pkg/tailbuffer/tailbuffer.go new file mode 100644 index 00000000..1f70ceb1 --- /dev/null +++ b/vendor/github.com/docker/model-runner/pkg/tailbuffer/tailbuffer.go @@ -0,0 +1,81 @@ +package tailbuffer + +import ( + "io" + "sync" +) + +type tailBuffer struct { + lock sync.Mutex + buf []byte + capacity uint + size uint + read uint + write uint +} + +func NewTailBuffer(size uint) io.ReadWriter { + return &tailBuffer{ + buf: make([]byte, size), + capacity: size, + size: 0, + read: 0, + write: 0, + } +} + +func (w *tailBuffer) Write(buffer []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + written := 0 + shouldPushRead := false + si := 0 + if len(buffer) > int(w.capacity) { + si = len(buffer) - int(w.capacity) + } + for _, b := range buffer[si:] { + if shouldPushRead { + if w.read+1 < w.capacity { + w.read += 1 + } else { + w.read = 0 + } + } + w.buf[w.write] = b + if w.write+1 < w.capacity { + w.write += 1 + } else { + w.write = 0 + } + w.size += 1 + if w.size > w.capacity { + w.size = w.capacity + } + shouldPushRead = w.write == w.read + written += 1 + } + return si + written, nil +} + +func (w *tailBuffer) Read(buffer []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + + var err error + read := uint(0) + for read < w.size && int(read) < len(buffer) { + buffer[read] = w.buf[w.read] + if w.read+1 < w.capacity { + w.read += 1 + } else { + w.read = 0 + } + read += 1 + } + w.size -= read + if read == 0 { + err = io.EOF + } + return int(read), err +} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml new file mode 100644 index 00000000..28f740cd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/.travis.yml @@ -0,0 +1,8 @@ +language: go +sudo: false + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md new file mode 100644 index 00000000..4ba6a8c6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md @@ -0,0 +1,49 @@ +# Version 1.x.x + +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added IEnumVARIANT test cases (#99, #100, #101). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). + +# Version 1.1.1 + + * Fixes for Linux build. + * Fixes for Windows build. + +# Version 1.1.0 + +The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. + + * Move GUID out of variables.go into its own file to make new documentation available. + * Move OleError out of ole.go into its own file to make new documentation available. + * Add documentation to utility functions. + * Add documentation to variant receiver functions. + * Add documentation to ole structures. + * Make variant available to other systems outside of Windows. + * Make OLE structures available to other systems outside of Windows. + +## New Features + + * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. + * More functions are now documented and available on godoc.org. + +# Version 1.0.1 + + 1. Fix package references from repository location change. + +# Version 1.0.0 + +This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. + +There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 00000000..623ec06f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md new file mode 100644 index 00000000..7b577558 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/README.md @@ -0,0 +1,46 @@ +# Go OLE + +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) +[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) + +Go bindings for Windows COM using shared libraries instead of cgo. + +By Yasuhiro Matsumoto. + +## Install + +To experiment with go-ole, you can just compile and run the example program: + +``` +go get github.com/go-ole/go-ole +cd /path/to/go-ole/ +go test + +cd /path/to/go-ole/example/excel +go run excel.go +``` + +## Continuous Integration + +Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. + +**Travis-CI** + +Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. + +**AppVeyor** + +AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. + +The tests currently do run and do pass and this should be maintained with commits. + +## Versioning + +Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. + +This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. + +## LICENSE + +Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml new file mode 100644 index 00000000..0d557ac2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -0,0 +1,54 @@ +# Notes: +# - Minimal appveyor.yml file is an empty file. All sections are optional. +# - Indent each level of configuration with 2 spaces. Do not use tabs! +# - All section names are case-sensitive. +# - Section names should be unique on each level. + +version: "1.3.0.{build}-alpha-{branch}" + +os: Windows Server 2012 R2 + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + matrix: + - GOARCH: amd64 + GOVERSION: 1.5 + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +install: + - choco install mingw + - SET PATH=c:\tools\mingw64\bin;%PATH% + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + # - set + - go version + - go env + - go get -u golang.org/x/tools/cmd/cover + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer + +build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... + - go build + - go test -v -cover ./... + +# disable automatic tests +test: off + +# disable deployment +deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go new file mode 100644 index 00000000..a9bef150 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -0,0 +1,344 @@ +// +build windows + +package ole + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + procCoInitialize = modole32.NewProc("CoInitialize") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCoCreateInstance = modole32.NewProc("CoCreateInstance") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromCLSID = modole32.NewProc("StringFromCLSID") + procStringFromIID = modole32.NewProc("StringFromIID") + procIIDFromString = modole32.NewProc("IIDFromString") + procCoGetObject = modole32.NewProc("CoGetObject") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procCopyMemory = modkernel32.NewProc("RtlMoveMemory") + procVariantInit = modoleaut32.NewProc("VariantInit") + procVariantClear = modoleaut32.NewProc("VariantClear") + procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") + procGetActiveObject = modoleaut32.NewProc("GetActiveObject") + + procGetMessageW = moduser32.NewProc("GetMessageW") + procDispatchMessageW = moduser32.NewProc("DispatchMessageW") +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx + // Suggests that no value should be passed to CoInitialized. + // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. + hr, _, _ := procCoInitialize.Call(uintptr(0)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx + // Suggests that the first parameter is not only optional but should always be NULL. + hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) (err error) { + // p is ignored and won't be used. + // Avoid any variable not used errors. + p = uintptr(0) + return coInitialize() +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) (err error) { + // Avoid any variable not used errors. + p = uintptr(0) + return coInitializeEx(coinit) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() { + procCoUninitialize.Call() +} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) { + procCoTaskMemFree.Call(memptr) +} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (clsid *GUID, err error) { + var guid GUID + lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) + hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoCreateInstance.Call( + uintptr(unsafe.Pointer(clsid)), + 0, + CLSCTX_SERVER, + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procGetActiveObject.Call( + uintptr(unsafe.Pointer(clsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +type BindOpts struct { + CbStruct uint32 + GrfFlags uint32 + GrfMode uint32 + TickCountDeadline uint32 +} + +// GetObject retrieves pointer to active object. +func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { + if bindOpts != nil { + bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) + } + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoGetObject.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), + uintptr(unsafe.Pointer(bindOpts)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) (err error) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) (err error) { + hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) (ss *int16) { + utf16 := utf16.Encode([]rune(v + "\x00")) + ptr := &utf16[0] + + pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) (err error) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint32(l) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { + hr, _, _ := procCreateStdDispatch.Call( + uintptr(unsafe.Pointer(unk)), + v, + uintptr(unsafe.Pointer(ptinfo)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { + hr, _, _ := procCreateDispTypeInfo.Call( + uintptr(unsafe.Pointer(idata)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&pptinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { + procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) +} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() (lcid uint32) { + ret, _, _ := procGetUserDefaultLCID.Call() + lcid = uint32(ret) + return +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { + r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) + ret = int32(r0) + return +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) + ret = int32(r0) + return +} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go new file mode 100644 index 00000000..cef539d9 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com_func.go @@ -0,0 +1,174 @@ +// +build !windows + +package ole + +import ( + "time" + "unsafe" +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() error { + return NewError(E_NOTIMPL) +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) error { + return NewError(E_NOTIMPL) +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() {} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) {} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) *int16 { + u := int16(0) + return &u +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) *int16 { + u := int16(0) + return &u +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) error { + return NewError(E_NOTIMPL) +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + return uint32(0) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { + return nil, NewError(E_NOTIMPL) +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() uint32 { + return uint32(0) +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) int32 { + return int32(0) +} + +func GetVariantDate(value uint64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go new file mode 100644 index 00000000..b2ac2ec6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/connect.go @@ -0,0 +1,192 @@ +package ole + +// Connection contains IUnknown for fluent interface interaction. +// +// Deprecated. Use oleutil package instead. +type Connection struct { + Object *IUnknown // Access COM +} + +// Initialize COM. +func (*Connection) Initialize() (err error) { + return coInitialize() +} + +// Uninitialize COM. +func (*Connection) Uninitialize() { + CoUninitialize() +} + +// Create IUnknown object based first on ProgId and then from String. +func (c *Connection) Create(progId string) (err error) { + var clsid *GUID + clsid, err = CLSIDFromProgID(progId) + if err != nil { + clsid, err = CLSIDFromString(progId) + if err != nil { + return + } + } + + unknown, err := CreateInstance(clsid, IID_IUnknown) + if err != nil { + return + } + c.Object = unknown + + return +} + +// Release IUnknown object. +func (c *Connection) Release() { + c.Object.Release() +} + +// Load COM object from list of programIDs or strings. +func (c *Connection) Load(names ...string) (errors []error) { + var tempErrors []error = make([]error, len(names)) + var numErrors int = 0 + for _, name := range names { + err := c.Create(name) + if err != nil { + tempErrors = append(tempErrors, err) + numErrors += 1 + continue + } + break + } + + copy(errors, tempErrors[0:numErrors]) + return +} + +// Dispatch returns Dispatch object. +func (c *Connection) Dispatch() (object *Dispatch, err error) { + dispatch, err := c.Object.QueryInterface(IID_IDispatch) + if err != nil { + return + } + object = &Dispatch{dispatch} + return +} + +// Dispatch stores IDispatch object. +type Dispatch struct { + Object *IDispatch // Dispatch object. +} + +// Call method on IDispatch with parameters. +func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(method) + if err != nil { + return + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + return +} + +// MustCall method on IDispatch with parameters. +func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(method) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + if err != nil { + panic(err) + } + + return +} + +// Get property on IDispatch with parameters. +func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + return +} + +// MustGet property on IDispatch with parameters. +func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + if err != nil { + panic(err) + } + return +} + +// Set property on IDispatch with parameters. +func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + return +} + +// MustSet property on IDispatch with parameters. +func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + if err != nil { + panic(err) + } + return +} + +// GetId retrieves ID of name on IDispatch. +func (d *Dispatch) GetId(name string) (id int32, err error) { + var dispid []int32 + dispid, err = d.Object.GetIDsOfName([]string{name}) + if err != nil { + return + } + id = dispid[0] + return +} + +// GetIds retrieves all IDs of names on IDispatch. +func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { + dispid, err = d.Object.GetIDsOfName(names) + return +} + +// Invoke IDispatch on DisplayID of dispatch type with parameters. +// +// There have been problems where if send cascading params..., it would error +// out because the parameters would be empty. +func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { + if len(params) < 1 { + result, err = d.Object.Invoke(id, dispatch) + } else { + result, err = d.Object.Invoke(id, dispatch, params...) + } + return +} + +// Release IDispatch object. +func (d *Dispatch) Release() { + d.Object.Release() +} + +// Connect initializes COM and attempts to load IUnknown based on given names. +func Connect(names ...string) (connection *Connection) { + connection.Initialize() + connection.Load(names...) + return +} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go new file mode 100644 index 00000000..fd0c6d74 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/constants.go @@ -0,0 +1,153 @@ +package ole + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + S_OK = 0x00000000 + E_UNEXPECTED = 0x8000FFFF + E_NOTIMPL = 0x80004001 + E_OUTOFMEMORY = 0x8007000E + E_INVALIDARG = 0x80070057 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_HANDLE = 0x80070006 + E_ABORT = 0x80004004 + E_FAIL = 0x80004005 + E_ACCESSDENIED = 0x80070005 + E_PENDING = 0x8000000A + + CO_E_CLASSSTRING = 0x800401F3 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +type VT uint16 + +const ( + VT_EMPTY VT = 0x0 + VT_NULL VT = 0x1 + VT_I2 VT = 0x2 + VT_I4 VT = 0x3 + VT_R4 VT = 0x4 + VT_R8 VT = 0x5 + VT_CY VT = 0x6 + VT_DATE VT = 0x7 + VT_BSTR VT = 0x8 + VT_DISPATCH VT = 0x9 + VT_ERROR VT = 0xa + VT_BOOL VT = 0xb + VT_VARIANT VT = 0xc + VT_UNKNOWN VT = 0xd + VT_DECIMAL VT = 0xe + VT_I1 VT = 0x10 + VT_UI1 VT = 0x11 + VT_UI2 VT = 0x12 + VT_UI4 VT = 0x13 + VT_I8 VT = 0x14 + VT_UI8 VT = 0x15 + VT_INT VT = 0x16 + VT_UINT VT = 0x17 + VT_VOID VT = 0x18 + VT_HRESULT VT = 0x19 + VT_PTR VT = 0x1a + VT_SAFEARRAY VT = 0x1b + VT_CARRAY VT = 0x1c + VT_USERDEFINED VT = 0x1d + VT_LPSTR VT = 0x1e + VT_LPWSTR VT = 0x1f + VT_RECORD VT = 0x24 + VT_INT_PTR VT = 0x25 + VT_UINT_PTR VT = 0x26 + VT_FILETIME VT = 0x40 + VT_BLOB VT = 0x41 + VT_STREAM VT = 0x42 + VT_STORAGE VT = 0x43 + VT_STREAMED_OBJECT VT = 0x44 + VT_STORED_OBJECT VT = 0x45 + VT_BLOB_OBJECT VT = 0x46 + VT_CF VT = 0x47 + VT_CLSID VT = 0x48 + VT_BSTR_BLOB VT = 0xfff + VT_VECTOR VT = 0x1000 + VT_ARRAY VT = 0x2000 + VT_BYREF VT = 0x4000 + VT_RESERVED VT = 0x8000 + VT_ILLEGAL VT = 0xffff + VT_ILLEGALMASKED VT = 0xfff + VT_TYPEMASK VT = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + TKIND_ENUM = 1 + TKIND_RECORD = 2 + TKIND_MODULE = 3 + TKIND_INTERFACE = 4 + TKIND_DISPATCH = 5 + TKIND_COCLASS = 6 + TKIND_ALIAS = 7 + TKIND_UNION = 8 + TKIND_MAX = 9 +) + +// Safe Array Feature Flags + +const ( + FADF_AUTO = 0x0001 + FADF_STATIC = 0x0002 + FADF_EMBEDDED = 0x0004 + FADF_FIXEDSIZE = 0x0010 + FADF_RECORD = 0x0020 + FADF_HAVEIID = 0x0040 + FADF_HAVEVARTYPE = 0x0080 + FADF_BSTR = 0x0100 + FADF_UNKNOWN = 0x0200 + FADF_DISPATCH = 0x0400 + FADF_VARIANT = 0x0800 + FADF_RESERVED = 0xF008 +) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go new file mode 100644 index 00000000..096b456d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error.go @@ -0,0 +1,51 @@ +package ole + +// OleError stores COM errors. +type OleError struct { + hr uintptr + description string + subError error +} + +// NewError creates new error with HResult. +func NewError(hr uintptr) *OleError { + return &OleError{hr: hr} +} + +// NewErrorWithDescription creates new COM error with HResult and description. +func NewErrorWithDescription(hr uintptr, description string) *OleError { + return &OleError{hr: hr, description: description} +} + +// NewErrorWithSubError creates new COM error with parent error. +func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { + return &OleError{hr: hr, description: description, subError: err} +} + +// Code is the HResult. +func (v *OleError) Code() uintptr { + return uintptr(v.hr) +} + +// String description, either manually set or format message with error code. +func (v *OleError) String() string { + if v.description != "" { + return errstr(int(v.hr)) + " (" + v.description + ")" + } + return errstr(int(v.hr)) +} + +// Error implements error interface. +func (v *OleError) Error() string { + return v.String() +} + +// Description retrieves error summary, if there is one. +func (v *OleError) Description() string { + return v.description +} + +// SubError returns parent error, if there is one. +func (v *OleError) SubError() error { + return v.subError +} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go new file mode 100644 index 00000000..8a2ffaa2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_func.go @@ -0,0 +1,8 @@ +// +build !windows + +package ole + +// errstr converts error code to string. +func errstr(errno int) string { + return "" +} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go new file mode 100644 index 00000000..d0e8e685 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package ole + +import ( + "fmt" + "syscall" + "unicode/utf16" +) + +// errstr converts error code to string. +func errstr(errno int) string { + // ask windows for the remaining errors + var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + b := make([]uint16, 300) + n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) + if err != nil { + return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go new file mode 100644 index 00000000..8d20f68f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/guid.go @@ -0,0 +1,284 @@ +package ole + +var ( + // IID_NULL is null Interface ID, used when no other Interface ID is known. + IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") + + // IID_IUnknown is for IUnknown interfaces. + IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") + + // IID_IDispatch is for IDispatch interfaces. + IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") + + // IID_IEnumVariant is for IEnumVariant interfaces + IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") + + // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. + IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") + + // IID_IConnectionPoint is for IConnectionPoint interfaces. + IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") + + // IID_IInspectable is for IInspectable interfaces. + IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") + + // IID_IProvideClassInfo is for IProvideClassInfo interfaces. + IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") +) + +// These are for testing and not part of any library. +var ( + // IID_ICOMTestString is for ICOMTestString interfaces. + // + // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} + IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") + + // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. + // + // {BEB06610-EB84-4155-AF58-E2BFF53680B4} + IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") + + // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. + // + // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} + IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") + + // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. + // + // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} + IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") + + // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. + // + // {8D437CBC-B3ED-485C-BC32-C336432A1623} + IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") + + // IID_ICOMTestFloat is for ICOMTestFloat interfaces. + // + // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} + IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") + + // IID_ICOMTestDouble is for ICOMTestDouble interfaces. + // + // {BF908A81-8687-4E93-999F-D86FAB284BA0} + IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") + + // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. + // + // {D530E7A6-4EE8-40D1-8931-3D63B8605010} + IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") + + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. + // + // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} + IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") + + // IID_ICOMTestTypes is for ICOMTestTypes interfaces. + // + // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} + IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") + + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") + + // CLSID_COMTestScalarClass is for COMTestScalarClass class. + // + // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} + CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") +) + +const hextable = "0123456789ABCDEF" +const emptyGUID = "{00000000-0000-0000-0000-000000000000}" + +// GUID is Windows API specific GUID type. +// +// This exists to match Windows GUID type for direct passing for COM. +// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// NewGUID converts the given string into a globally unique identifier that is +// compliant with the Windows API. +// +// The supplied string may be in any of these formats: +// +// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// The conversion of the supplied string is not case-sensitive. +func NewGUID(guid string) *GUID { + d := []byte(guid) + var d1, d2, d3, d4a, d4b []byte + + switch len(d) { + case 38: + if d[0] != '{' || d[37] != '}' { + return nil + } + d = d[1:37] + fallthrough + case 36: + if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { + return nil + } + d1 = d[0:8] + d2 = d[9:13] + d3 = d[14:18] + d4a = d[19:23] + d4b = d[24:36] + case 32: + d1 = d[0:8] + d2 = d[8:12] + d3 = d[12:16] + d4a = d[16:20] + d4b = d[20:32] + default: + return nil + } + + var g GUID + var ok1, ok2, ok3, ok4 bool + g.Data1, ok1 = decodeHexUint32(d1) + g.Data2, ok2 = decodeHexUint16(d2) + g.Data3, ok3 = decodeHexUint16(d3) + g.Data4, ok4 = decodeHexByte64(d4a, d4b) + if ok1 && ok2 && ok3 && ok4 { + return &g + } + return nil +} + +func decodeHexUint32(src []byte) (value uint32, ok bool) { + var b1, b2, b3, b4 byte + var ok1, ok2, ok3, ok4 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + b3, ok3 = decodeHexByte(src[4], src[5]) + b4, ok4 = decodeHexByte(src[6], src[7]) + value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) + ok = ok1 && ok2 && ok3 && ok4 + return +} + +func decodeHexUint16(src []byte) (value uint16, ok bool) { + var b1, b2 byte + var ok1, ok2 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + value = (uint16(b1) << 8) | uint16(b2) + ok = ok1 && ok2 + return +} + +func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { + var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool + value[0], ok1 = decodeHexByte(s1[0], s1[1]) + value[1], ok2 = decodeHexByte(s1[2], s1[3]) + value[2], ok3 = decodeHexByte(s2[0], s2[1]) + value[3], ok4 = decodeHexByte(s2[2], s2[3]) + value[4], ok5 = decodeHexByte(s2[4], s2[5]) + value[5], ok6 = decodeHexByte(s2[6], s2[7]) + value[6], ok7 = decodeHexByte(s2[8], s2[9]) + value[7], ok8 = decodeHexByte(s2[10], s2[11]) + ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 + return +} + +func decodeHexByte(c1, c2 byte) (value byte, ok bool) { + var n1, n2 byte + var ok1, ok2 bool + n1, ok1 = decodeHexChar(c1) + n2, ok2 = decodeHexChar(c2) + value = (n1 << 4) | n2 + ok = ok1 && ok2 + return +} + +func decodeHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} + +// String converts the GUID to string form. It will adhere to this pattern: +// +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// If the GUID is nil, the string representation of an empty GUID is returned: +// +// {00000000-0000-0000-0000-000000000000} +func (guid *GUID) String() string { + if guid == nil { + return emptyGUID + } + + var c [38]byte + c[0] = '{' + putUint32Hex(c[1:9], guid.Data1) + c[9] = '-' + putUint16Hex(c[10:14], guid.Data2) + c[14] = '-' + putUint16Hex(c[15:19], guid.Data3) + c[19] = '-' + putByteHex(c[20:24], guid.Data4[0:2]) + c[24] = '-' + putByteHex(c[25:37], guid.Data4[2:8]) + c[37] = '}' + return string(c[:]) +} + +func putUint32Hex(b []byte, v uint32) { + b[0] = hextable[byte(v>>24)>>4] + b[1] = hextable[byte(v>>24)&0x0f] + b[2] = hextable[byte(v>>16)>>4] + b[3] = hextable[byte(v>>16)&0x0f] + b[4] = hextable[byte(v>>8)>>4] + b[5] = hextable[byte(v>>8)&0x0f] + b[6] = hextable[byte(v)>>4] + b[7] = hextable[byte(v)&0x0f] +} + +func putUint16Hex(b []byte, v uint16) { + b[0] = hextable[byte(v>>8)>>4] + b[1] = hextable[byte(v>>8)&0x0f] + b[2] = hextable[byte(v)>>4] + b[3] = hextable[byte(v)&0x0f] +} + +func putByteHex(dst, src []byte) { + for i := 0; i < len(src); i++ { + dst[i*2] = hextable[src[i]>>4] + dst[i*2+1] = hextable[src[i]&0x0f] + } +} + +// IsEqualGUID compares two GUID. +// +// Not constant time comparison. +func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { + return guid1.Data1 == guid2.Data1 && + guid1.Data2 == guid2.Data2 && + guid1.Data3 == guid2.Data3 && + guid1.Data4[0] == guid2.Data4[0] && + guid1.Data4[1] == guid2.Data4[1] && + guid1.Data4[2] == guid2.Data4[2] && + guid1.Data4[3] == guid2.Data4[3] && + guid1.Data4[4] == guid2.Data4[4] && + guid1.Data4[5] == guid2.Data4[5] && + guid1.Data4[6] == guid2.Data4[6] && + guid1.Data4[7] == guid2.Data4[7] +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go new file mode 100644 index 00000000..9e6c49f4 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go @@ -0,0 +1,20 @@ +package ole + +import "unsafe" + +type IConnectionPoint struct { + IUnknown +} + +type IConnectionPointVtbl struct { + IUnknownVtbl + GetConnectionInterface uintptr + GetConnectionPointContainer uintptr + Advise uintptr + Unadvise uintptr + EnumConnections uintptr +} + +func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { + return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go new file mode 100644 index 00000000..5414dc3c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go @@ -0,0 +1,21 @@ +// +build !windows + +package ole + +import "unsafe" + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + return int32(0) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go new file mode 100644 index 00000000..32bc1832 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + // XXX: This doesn't look like it does what it's supposed to + return release((*IUnknown)(unsafe.Pointer(v))) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Advise, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(&cookie))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Unadvise, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(cookie), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go new file mode 100644 index 00000000..165860d1 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go @@ -0,0 +1,17 @@ +package ole + +import "unsafe" + +type IConnectionPointContainer struct { + IUnknown +} + +type IConnectionPointContainerVtbl struct { + IUnknownVtbl + EnumConnectionPoints uintptr + FindConnectionPoint uintptr +} + +func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { + return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go new file mode 100644 index 00000000..5dfa42aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go @@ -0,0 +1,11 @@ +// +build !windows + +package ole + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go new file mode 100644 index 00000000..ad30d79e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().FindConnectionPoint, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(point))) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go new file mode 100644 index 00000000..d4af1240 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch.go @@ -0,0 +1,94 @@ +package ole + +import "unsafe" + +type IDispatch struct { + IUnknown +} + +type IDispatchVtbl struct { + IUnknownVtbl + GetTypeInfoCount uintptr + GetTypeInfo uintptr + GetIDsOfNames uintptr + Invoke uintptr +} + +func (v *IDispatch) VTable() *IDispatchVtbl { + return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { + dispid, err = getIDsOfName(v, names) + return +} + +func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + result, err = invoke(v, dispid, dispatch, params...) + return +} + +func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { + c, err = getTypeInfoCount(v) + return +} + +func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { + tinfo, err = getTypeInfo(v) + return +} + +// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. +// +// This replaces the common pattern of attempting to get a single name from the list of available +// IDs. It gives the first ID, if it is available. +func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { + var displayIDs []int32 + displayIDs, err = v.GetIDsOfName([]string{name}) + if err != nil { + return + } + displayID = displayIDs[0] + return +} + +// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. +// +// Accepts name and will attempt to retrieve Display ID to pass to Invoke. +// +// Passing params as an array is a workaround that could be fixed in later versions of Go that +// prevent passing empty params. During testing it was discovered that this is an acceptable way of +// getting around not being able to pass params normally. +func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { + displayID, err := v.GetSingleIDOfName(name) + if err != nil { + return + } + + if len(params) < 1 { + result, err = v.Invoke(displayID, dispatch) + } else { + result, err = v.Invoke(displayID, dispatch, params...) + } + + return +} + +// CallMethod invokes named function with arguments on object. +func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) +} + +// GetProperty retrieves the property with the name with the ability to pass arguments. +// +// Most of the time you will not need to pass arguments as most objects do not allow for this +// feature. Or at least, should not allow for this feature. Some servers don't follow best practices +// and this is provided for those edge cases. +func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) +} + +// PutProperty attempts to mutate a property in the object. +func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go new file mode 100644 index 00000000..b8fbbe31 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { + return []int32{}, NewError(E_NOTIMPL) +} + +func getTypeInfoCount(disp *IDispatch) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { + return nil, NewError(E_NOTIMPL) +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go new file mode 100644 index 00000000..b399f047 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -0,0 +1,202 @@ +// +build windows + +package ole + +import ( + "math/big" + "syscall" + "time" + "unsafe" +) + +func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { + wnames := make([]*uint16, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + dispid = make([]int32, len(names)) + namelen := uint32(len(names)) + hr, _, _ := syscall.Syscall6( + disp.VTable().GetIDsOfNames, + 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(namelen), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfoCount, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&c)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfo, + 3, + uintptr(unsafe.Pointer(disp)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&tinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch vv := v.(type) { + case bool: + if vv { + vargs[n] = NewVariant(VT_BOOL, 0xffff) + } else { + vargs[n] = NewVariant(VT_BOOL, 0) + } + case *bool: + vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) + case uint8: + vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) + case *uint8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int8: + vargs[n] = NewVariant(VT_I1, int64(v.(int8))) + case *int8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int16: + vargs[n] = NewVariant(VT_I2, int64(v.(int16))) + case *int16: + vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) + case uint16: + vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) + case *uint16: + vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) + case int32: + vargs[n] = NewVariant(VT_I4, int64(v.(int32))) + case *int32: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) + case uint32: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) + case *uint32: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) + case int64: + vargs[n] = NewVariant(VT_I8, int64(v.(int64))) + case *int64: + vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) + case uint64: + vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) + case *uint64: + vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) + case int: + vargs[n] = NewVariant(VT_I4, int64(v.(int))) + case *int: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) + case uint: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) + case *uint: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) + case float32: + vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) + case *float32: + vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) + case float64: + vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) + case *float64: + vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case *big.Int: + vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) + case string: + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) + case *string: + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) + case time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) + case *time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) + case *IDispatch: + vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) + case **IDispatch: + vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) + case nil: + vargs[n] = NewVariant(VT_NULL, 0) + case *VARIANT: + vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) + case []byte: + safeByteArray := safeArrayFromByteSlice(v.([]byte)) + vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + default: + panic("unknown type") + } + } + dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.cArgs = uint32(len(params)) + } + + result = new(VARIANT) + var excepInfo EXCEPINFO + VariantInit(result) + hr, _, _ := syscall.Syscall9( + disp.VTable().Invoke, + 9, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(result)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + excepInfo.renderStrings() + excepInfo.Clear() + err = NewErrorWithSubError(hr, excepInfo.description, excepInfo) + } + for i, varg := range vargs { + n := len(params) - i - 1 + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { + *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) + } + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go new file mode 100644 index 00000000..24338975 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go @@ -0,0 +1,19 @@ +package ole + +import "unsafe" + +type IEnumVARIANT struct { + IUnknown +} + +type IEnumVARIANTVtbl struct { + IUnknownVtbl + Next uintptr + Skip uintptr + Reset uintptr + Clone uintptr +} + +func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { + return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go new file mode 100644 index 00000000..c1484819 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { + return nil, NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Reset() error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Skip(celt uint) error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { + return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go new file mode 100644 index 00000000..4781f3b8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go @@ -0,0 +1,63 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Clone, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(unsafe.Pointer(&cloned)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Reset() (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Reset, + 1, + uintptr(unsafe.Pointer(enum)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Skip(celt uint) (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Skip, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { + hr, _, _ := syscall.Syscall6( + enum.VTable().Next, + 4, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + uintptr(unsafe.Pointer(&array)), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go new file mode 100644 index 00000000..f4a19e25 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable.go @@ -0,0 +1,18 @@ +package ole + +import "unsafe" + +type IInspectable struct { + IUnknown +} + +type IInspectableVtbl struct { + IUnknownVtbl + GetIIds uintptr + GetRuntimeClassName uintptr + GetTrustLevel uintptr +} + +func (v *IInspectable) VTable() *IInspectableVtbl { + return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go new file mode 100644 index 00000000..348829bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go @@ -0,0 +1,15 @@ +// +build !windows + +package ole + +func (v *IInspectable) GetIids() ([]*GUID, error) { + return []*GUID{}, NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetRuntimeClassName() (string, error) { + return "", NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetTrustLevel() (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go new file mode 100644 index 00000000..4519a4aa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package ole + +import ( + "bytes" + "encoding/binary" + "reflect" + "syscall" + "unsafe" +) + +func (v *IInspectable) GetIids() (iids []*GUID, err error) { + var count uint32 + var array uintptr + hr, _, _ := syscall.Syscall( + v.VTable().GetIIds, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&count)), + uintptr(unsafe.Pointer(&array))) + if hr != 0 { + err = NewError(hr) + return + } + defer CoTaskMemFree(array) + + iids = make([]*GUID, count) + byteCount := count * uint32(unsafe.Sizeof(GUID{})) + slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} + byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) + reader := bytes.NewReader(byteSlice) + for i := range iids { + guid := GUID{} + err = binary.Read(reader, binary.LittleEndian, &guid) + if err != nil { + return + } + iids[i] = &guid + } + return +} + +func (v *IInspectable) GetRuntimeClassName() (s string, err error) { + var hstring HString + hr, _, _ := syscall.Syscall( + v.VTable().GetRuntimeClassName, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&hstring)), + 0) + if hr != 0 { + err = NewError(hr) + return + } + s = hstring.String() + DeleteHString(hstring) + return +} + +func (v *IInspectable) GetTrustLevel() (level uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().GetTrustLevel, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&level)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go new file mode 100644 index 00000000..25f3a6f2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go @@ -0,0 +1,21 @@ +package ole + +import "unsafe" + +type IProvideClassInfo struct { + IUnknown +} + +type IProvideClassInfoVtbl struct { + IUnknownVtbl + GetClassInfo uintptr +} + +func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { + return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { + cinfo, err = getClassInfo(v) + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go new file mode 100644 index 00000000..7e3cb63e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go new file mode 100644 index 00000000..2ad01639 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetClassInfo, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&tinfo)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go new file mode 100644 index 00000000..dd3c5e21 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go @@ -0,0 +1,34 @@ +package ole + +import "unsafe" + +type ITypeInfo struct { + IUnknown +} + +type ITypeInfoVtbl struct { + IUnknownVtbl + GetTypeAttr uintptr + GetTypeComp uintptr + GetFuncDesc uintptr + GetVarDesc uintptr + GetNames uintptr + GetRefTypeOfImplType uintptr + GetImplTypeFlags uintptr + GetIDsOfNames uintptr + Invoke uintptr + GetDocumentation uintptr + GetDllEntry uintptr + GetRefTypeInfo uintptr + AddressOfMember uintptr + CreateInstance uintptr + GetMops uintptr + GetContainingTypeLib uintptr + ReleaseTypeAttr uintptr + ReleaseFuncDesc uintptr + ReleaseVarDesc uintptr +} + +func (v *ITypeInfo) VTable() *ITypeInfoVtbl { + return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go new file mode 100644 index 00000000..8364a659 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go new file mode 100644 index 00000000..54782b3d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { + hr, _, _ := syscall.Syscall( + uintptr(v.VTable().GetTypeAttr), + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&tattr)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go new file mode 100644 index 00000000..108f28ea --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown.go @@ -0,0 +1,57 @@ +package ole + +import "unsafe" + +type IUnknown struct { + RawVTable *interface{} +} + +type IUnknownVtbl struct { + QueryInterface uintptr + AddRef uintptr + Release uintptr +} + +type UnknownLike interface { + QueryInterface(iid *GUID) (disp *IDispatch, err error) + AddRef() int32 + Release() int32 +} + +func (v *IUnknown) VTable() *IUnknownVtbl { + return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + +func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { + return queryInterface(v, iid) +} + +func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { + unk, err := queryInterface(v, iid) + if err != nil { + panic(err) + } + return unk +} + +func (v *IUnknown) AddRef() int32 { + return addRef(v) +} + +func (v *IUnknown) Release() int32 { + return release(v) +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go new file mode 100644 index 00000000..d0a62cfd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + return nil, NewError(E_NOTIMPL) +} + +func addRef(unk *IUnknown) int32 { + return 0 +} + +func release(unk *IUnknown) int32 { + return 0 +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go new file mode 100644 index 00000000..ede5bb8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unsafe" +) + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + selfValue := reflect.ValueOf(self).Elem() + objValue := reflect.ValueOf(obj).Elem() + + hr, _, _ := syscall.Syscall( + method, + 3, + selfValue.UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + objValue.Addr().Pointer()) + if hr != 0 { + err = NewError(hr) + } + return +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + hr, _, _ := syscall.Syscall( + unk.VTable().QueryInterface, + 3, + uintptr(unsafe.Pointer(unk)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func addRef(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().AddRef, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} + +func release(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().Release, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go new file mode 100644 index 00000000..dbd132bb --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ole.go @@ -0,0 +1,190 @@ +package ole + +import ( + "fmt" + "strings" + "unsafe" +) + +// DISPPARAMS are the arguments that passed to methods or property. +type DISPPARAMS struct { + rgvarg uintptr + rgdispidNamedArgs uintptr + cArgs uint32 + cNamedArgs uint32 +} + +// EXCEPINFO defines exception info. +type EXCEPINFO struct { + wCode uint16 + wReserved uint16 + bstrSource *uint16 + bstrDescription *uint16 + bstrHelpFile *uint16 + dwHelpContext uint32 + pvReserved uintptr + pfnDeferredFillIn uintptr + scode uint32 + + // Go-specific part. Don't move upper cos it'll break structure layout for native code. + rendered bool + source string + description string + helpFile string +} + +// renderStrings translates BSTR strings to Go ones so `.Error` and `.String` +// could be safely called after `.Clear`. We need this when we can't rely on +// a caller to call `.Clear`. +func (e *EXCEPINFO) renderStrings() { + e.rendered = true + if e.bstrSource == nil { + e.source = "" + } else { + e.source = BstrToString(e.bstrSource) + } + if e.bstrDescription == nil { + e.description = "" + } else { + e.description = BstrToString(e.bstrDescription) + } + if e.bstrHelpFile == nil { + e.helpFile = "" + } else { + e.helpFile = BstrToString(e.bstrHelpFile) + } +} + +// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL. +func (e *EXCEPINFO) Clear() { + freeBSTR := func(s *uint16) { + // SysFreeString don't return errors and is safe for call's on NULL. + // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring + _ = SysFreeString((*int16)(unsafe.Pointer(s))) + } + + if e.bstrSource != nil { + freeBSTR(e.bstrSource) + e.bstrSource = nil + } + if e.bstrDescription != nil { + freeBSTR(e.bstrDescription) + e.bstrDescription = nil + } + if e.bstrHelpFile != nil { + freeBSTR(e.bstrHelpFile) + e.bstrHelpFile = nil + } +} + +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + +// String convert EXCEPINFO to string. +func (e EXCEPINFO) String() string { + if !e.rendered { + e.renderStrings() + } + return fmt.Sprintf( + "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", + e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode, + ) +} + +// Error implements error interface and returns error string. +func (e EXCEPINFO) Error() string { + if !e.rendered { + e.renderStrings() + } + + if e.description != "" { + return strings.TrimSpace(e.description) + } + + code := e.scode + if e.wCode != 0 { + code = uint32(e.wCode) + } + return fmt.Sprintf("%v: %#x", e.source, code) +} + +// PARAMDATA defines parameter data type. +type PARAMDATA struct { + Name *int16 + Vt uint16 +} + +// METHODDATA defines method info. +type METHODDATA struct { + Name *uint16 + Data *PARAMDATA + Dispid int32 + Meth uint32 + CC int32 + CArgs uint32 + Flags uint16 + VtReturn uint32 +} + +// INTERFACEDATA defines interface info. +type INTERFACEDATA struct { + MethodData *METHODDATA + CMembers uint32 +} + +// Point is 2D vector type. +type Point struct { + X int32 + Y int32 +} + +// Msg is message between processes. +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +// TYPEDESC defines data type. +type TYPEDESC struct { + Hreftype uint32 + VT uint16 +} + +// IDLDESC defines IDL info. +type IDLDESC struct { + DwReserved uint32 + WIDLFlags uint16 +} + +// TYPEATTR defines type info. +type TYPEATTR struct { + Guid GUID + Lcid uint32 + dwReserved uint32 + MemidConstructor int32 + MemidDestructor int32 + LpstrSchema *uint16 + CbSizeInstance uint32 + Typekind int32 + CFuncs uint16 + CVars uint16 + CImplTypes uint16 + CbSizeVft uint16 + CbAlignment uint16 + WTypeFlags uint16 + WMajorVerNum uint16 + WMinorVerNum uint16 + TdescAlias TYPEDESC + IdldescType IDLDESC +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go new file mode 100644 index 00000000..60df73cd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go @@ -0,0 +1,100 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +type stdDispatch struct { + lpVtbl *stdDispatchVtbl + ref int32 + iid *ole.GUID + iface interface{} + funcMap map[string]int32 +} + +type stdDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + *punk = nil + if ole.IsEqualGUID(iid, ole.IID_IUnknown) || + ole.IsEqualGUID(iid, ole.IID_IDispatch) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + if ole.IsEqualGUID(iid, pthis.iid) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + return ole.E_NOINTERFACE +} + +func dispAddRef(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref++ + return pthis.ref +} + +func dispRelease(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref-- + return pthis.ref +} + +func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + names := make([]string, len(wnames)) + for i := 0; i < len(names); i++ { + names[i] = ole.LpOleStrToString(wnames[i]) + } + for n := 0; n < namelen; n++ { + if id, ok := pthis.funcMap[names[n]]; ok { + pdisp[n] = id + } + } + return ole.S_OK +} + +func dispGetTypeInfoCount(pcount *int) uintptr { + if pcount != nil { + *pcount = 0 + } + return ole.S_OK +} + +func dispGetTypeInfo(ptypeif *uintptr) uintptr { + return ole.E_NOTIMPL +} + +func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + found := "" + for name, id := range pthis.funcMap { + if id == dispid { + found = name + } + } + if found != "" { + rv := reflect.ValueOf(pthis.iface).Elem() + rm := rv.MethodByName(found) + rr := rm.Call([]reflect.Value{}) + println(len(rr)) + return ole.S_OK + } + return ole.E_NOTIMPL +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go new file mode 100644 index 00000000..8818fb82 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go @@ -0,0 +1,10 @@ +// +build !windows + +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { + return 0, ole.NewError(ole.E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go new file mode 100644 index 00000000..ab9c0d8d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "syscall" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { + unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) + if err != nil { + return + } + + container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) + var point *ole.IConnectionPoint + err = container.FindConnectionPoint(iid, &point) + if err != nil { + return + } + if edisp, ok := idisp.(*ole.IUnknown); ok { + cookie, err = point.Advise(edisp) + container.Release() + if err != nil { + return + } + } + rv := reflect.ValueOf(disp).Elem() + if rv.Type().Kind() == reflect.Struct { + dest := &stdDispatch{} + dest.lpVtbl = &stdDispatchVtbl{} + dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) + dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) + dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) + dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) + dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) + dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) + dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) + dest.iface = disp + dest.iid = iid + cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) + container.Release() + if err != nil { + point.Release() + return + } + return + } + + container.Release() + + return 0, ole.NewError(ole.E_INVALIDARG) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go new file mode 100644 index 00000000..58347628 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go @@ -0,0 +1,6 @@ +// This file is here so go get succeeds as without it errors with: +// no buildable Go source files in ... +// +// +build !windows + +package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go new file mode 100644 index 00000000..f7803c1e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -0,0 +1,127 @@ +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +func ClassIDFrom(programID string) (classID *ole.GUID, err error) { + return ole.ClassIDFrom(programID) +} + +// CreateObject creates object from programID based on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func CreateObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// GetActiveObject retrieves active object for program ID and interface ID based +// on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// CallMethod calls method on IDispatch with parameters. +func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) +} + +// MustCallMethod calls method on IDispatch with parameters or panics. +func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := CallMethod(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// GetProperty retrieves property from IDispatch. +func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) +} + +// MustGetProperty retrieves property from IDispatch or panics. +func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := GetProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutProperty mutates property. +func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) +} + +// MustPutProperty mutates property or panics. +func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { + newEnum, err := disp.GetProperty("_NewEnum") + if err != nil { + return err + } + defer newEnum.Clear() + + enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + defer enum.Release() + + for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { + if err != nil { + return err + } + if ferr := f(&item); ferr != nil { + return ferr + } + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go new file mode 100644 index 00000000..a5201b56 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray.go @@ -0,0 +1,27 @@ +// Package is meant to retrieve and process safe array data returned from COM. + +package ole + +// SafeArrayBound defines the SafeArray boundaries. +type SafeArrayBound struct { + Elements uint32 + LowerBound int32 +} + +// SafeArray is how COM handles arrays. +type SafeArray struct { + Dimensions uint16 + FeaturesFlag uint16 + ElementsSize uint32 + LocksAmount uint32 + Data uint32 + Bounds [16]byte +} + +// SAFEARRAY is obsolete, exists for backwards compatibility. +// Use SafeArray +type SAFEARRAY SafeArray + +// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. +// Use SafeArrayBound +type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go new file mode 100644 index 00000000..0dee670c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -0,0 +1,211 @@ +// +build !windows + +package ole + +import ( + "unsafe" +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { + return uintptr(0), NewError(E_NOTIMPL) +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { + return uint16(0), NewError(E_NOTIMPL) +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go new file mode 100644 index 00000000..0c1b3a10 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -0,0 +1,337 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +var ( + procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData") + procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy") + procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData") + procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate") + procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx") + procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy") + procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim") + procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement") + procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize") + procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID") + procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound") + procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound") + procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype") + procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock") + procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData") + procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock") + procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement") + //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO + //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo") +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +// Todo: Test +func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { + err = convertHresultToError( + procSafeArrayAccessData.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&element)))) + return +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptorEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayCopy.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { + err = convertHresultToError( + procSafeArrayCopyData.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(duplicate)))) + return +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreate.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds))) + safearray = (*SafeArray)(unsafe.Pointer(&sa)) + return +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds)), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVector.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length)) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVectorEx.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { + l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) + dimensions = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { + l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) + length = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(pv))) +} + +// safeArrayGetElementString retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { + var element *int16 + err = convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(&element)))) + str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) + SysFreeString(element) + return +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { + err = convertHresultToError( + procSafeArrayGetIID.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&guid)))) + return +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetLBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&lowerBound)))) + return +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetUBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&upperBound)))) + return +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { + err = convertHresultToError( + procSafeArrayGetVartype.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&varType)))) + return +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { + err = convertHresultToError( + procSafeArrayPutElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(element)))) + return +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { + err = convertHresultToError( + procSafeArrayGetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { + err = convertHresultToError( + procSafeArraySetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go new file mode 100644 index 00000000..da737293 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -0,0 +1,140 @@ +// Helper for converting SafeArray to array of objects. + +package ole + +import ( + "unsafe" +) + +type SafeArrayConversion struct { + Array *SafeArray +} + +func (sac *SafeArrayConversion) ToStringArray() (strings []string) { + totalElements, _ := sac.TotalElements(0) + strings = make([]string, totalElements) + + for i := int32(0); i < totalElements; i++ { + strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) + } + + return +} + +func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { + totalElements, _ := sac.TotalElements(0) + bytes = make([]byte, totalElements) + + for i := int32(0); i < totalElements; i++ { + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) + } + + return +} + +func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { + totalElements, _ := sac.TotalElements(0) + values = make([]interface{}, totalElements) + vt, _ := safeArrayGetVartype(sac.Array) + + for i := int32(0); i < totalElements; i++ { + switch VT(vt) { + case VT_BOOL: + var v bool + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I1: + var v int8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I2: + var v int16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I4: + var v int32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I8: + var v int64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI1: + var v uint8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI2: + var v uint16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI4: + var v uint32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI8: + var v uint64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R4: + var v float32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R8: + var v float64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_BSTR: + v , _ := safeArrayGetElementString(sac.Array, i) + values[i] = v + case VT_VARIANT: + var v VARIANT + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v.Value() + v.Clear() + default: + // TODO + } + } + + return +} + +func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { + return safeArrayGetVartype(sac.Array) +} + +func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { + return safeArrayGetDim(sac.Array) +} + +func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { + return safeArrayGetElementSize(sac.Array) +} + +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { + if index < 1 { + index = 1 + } + + // Get array bounds + var LowerBounds int32 + var UpperBounds int32 + + LowerBounds, err = safeArrayGetLBound(sac.Array, index) + if err != nil { + return + } + + UpperBounds, err = safeArrayGetUBound(sac.Array, index) + if err != nil { + return + } + + totalElements = UpperBounds - LowerBounds + 1 + return +} + +// Release Safe Array memory +func (sac *SafeArrayConversion) Release() { + safeArrayDestroy(sac.Array) +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go new file mode 100644 index 00000000..a9fa885f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go @@ -0,0 +1,33 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +func safeArrayFromByteSlice(slice []byte) *SafeArray { + array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []byte to SAFEARRAY") + } + + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) + } + return array +} + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go new file mode 100644 index 00000000..99ee82dc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/utility.go @@ -0,0 +1,101 @@ +package ole + +import ( + "unicode/utf16" + "unsafe" +) + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +// +// Helper that provides check against both Class ID from Program ID and Class ID from string. It is +// faster, if you know which you are using, to use the individual functions, but this will check +// against available functions for you. +func ClassIDFrom(programID string) (classID *GUID, err error) { + classID, err = CLSIDFromProgID(programID) + if err != nil { + classID, err = CLSIDFromString(programID) + if err != nil { + return + } + } + return +} + +// BytePtrToString converts byte pointer to a Go string. +func BytePtrToString(p *byte) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// UTF16PtrToString is alias for LpOleStrToString. +// +// Kept for compatibility reasons. +func UTF16PtrToString(p *uint16) string { + return LpOleStrToString(p) +} + +// LpOleStrToString converts COM Unicode to Go string. +func LpOleStrToString(p *uint16) string { + if p == nil { + return "" + } + + length := lpOleStrLen(p) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + + return string(utf16.Decode(a)) +} + +// BstrToString converts COM binary string to Go string. +func BstrToString(p *uint16) string { + if p == nil { + return "" + } + length := SysStringLen((*int16)(unsafe.Pointer(p))) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return string(utf16.Decode(a)) +} + +// lpOleStrLen returns the length of Unicode string. +func lpOleStrLen(p *uint16) (length int64) { + if p == nil { + return 0 + } + + ptr := unsafe.Pointer(p) + + for i := 0; ; i++ { + if 0 == *(*uint16)(ptr) { + length = int64(i) + break + } + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return +} + +// convertHresultToError converts syscall to error, if call is unsuccessful. +func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go new file mode 100644 index 00000000..a6add1b0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variables.go @@ -0,0 +1,15 @@ +// +build windows + +package ole + +import ( + "golang.org/x/sys/windows" +) + +var ( + modcombase = windows.NewLazySystemDLL("combase.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modole32 = windows.NewLazySystemDLL("ole32.dll") + modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll") + moduser32 = windows.NewLazySystemDLL("user32.dll") +) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go new file mode 100644 index 00000000..967a23fe --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -0,0 +1,105 @@ +package ole + +import "unsafe" + +// NewVariant returns new variant based on type and value. +func NewVariant(vt VT, val int64) VARIANT { + return VARIANT{VT: vt, Val: val} +} + +// ToIUnknown converts Variant to Unknown object. +func (v *VARIANT) ToIUnknown() *IUnknown { + if v.VT != VT_UNKNOWN { + return nil + } + return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToIDispatch converts variant to dispatch object. +func (v *VARIANT) ToIDispatch() *IDispatch { + if v.VT != VT_DISPATCH { + return nil + } + return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToArray converts variant to SafeArray helper. +func (v *VARIANT) ToArray() *SafeArrayConversion { + if v.VT != VT_SAFEARRAY { + if v.VT&VT_ARRAY == 0 { + return nil + } + } + var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) + return &SafeArrayConversion{safeArray} +} + +// ToString converts variant to Go string. +func (v *VARIANT) ToString() string { + if v.VT != VT_BSTR { + return "" + } + return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) +} + +// Clear the memory of variant object. +func (v *VARIANT) Clear() error { + return VariantClear(v) +} + +// Value returns variant value based on its type. +// +// Currently supported types: 2- and 4-byte integers, strings, bools. +// Note that 64-bit integers, datetimes, and other types are stored as strings +// and will be returned as strings. +// +// Needs to be further converted, because this returns an interface{}. +func (v *VARIANT) Value() interface{} { + switch v.VT { + case VT_I1: + return int8(v.Val) + case VT_UI1: + return uint8(v.Val) + case VT_I2: + return int16(v.Val) + case VT_UI2: + return uint16(v.Val) + case VT_I4: + return int32(v.Val) + case VT_UI4: + return uint32(v.Val) + case VT_I8: + return int64(v.Val) + case VT_UI8: + return uint64(v.Val) + case VT_INT: + return int(v.Val) + case VT_UINT: + return uint(v.Val) + case VT_INT_PTR: + return uintptr(v.Val) // TODO + case VT_UINT_PTR: + return uintptr(v.Val) + case VT_R4: + return *(*float32)(unsafe.Pointer(&v.Val)) + case VT_R8: + return *(*float64)(unsafe.Pointer(&v.Val)) + case VT_BSTR: + return v.ToString() + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := uint64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return float64(v.Val) + } + return date + case VT_UNKNOWN: + return v.ToIUnknown() + case VT_DISPATCH: + return v.ToIDispatch() + case VT_BOOL: + return v.Val != 0 + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go new file mode 100644 index 00000000..e73736bf --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_386.go @@ -0,0 +1,11 @@ +// +build 386 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go new file mode 100644 index 00000000..dccdde13 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go @@ -0,0 +1,12 @@ +// +build amd64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go new file mode 100644 index 00000000..d4724544 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm.go @@ -0,0 +1,11 @@ +// +build arm + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go new file mode 100644 index 00000000..78473cec --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm64.go @@ -0,0 +1,13 @@ +//go:build arm64 +// +build arm64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go new file mode 100644 index 00000000..1b970f63 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_386.go @@ -0,0 +1,22 @@ +// +build windows,386 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go new file mode 100644 index 00000000..6952f1f0 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go @@ -0,0 +1,20 @@ +// +build windows,amd64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go new file mode 100644 index 00000000..09ec7b5c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm.go @@ -0,0 +1,22 @@ +// +build windows,arm + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go new file mode 100644 index 00000000..02b04a0d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go @@ -0,0 +1,23 @@ +//go:build windows && arm64 +// +build windows,arm64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go new file mode 100644 index 00000000..326427a7 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 00000000..9874ca66 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go new file mode 100644 index 00000000..729b4a04 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/vt_string.go @@ -0,0 +1,58 @@ +// generated by stringer -output vt_string.go -type VT; DO NOT EDIT + +package ole + +import "fmt" + +const ( + _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" + _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" + _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" + _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" + _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" + _VT_name_5 = "VT_ARRAY" + _VT_name_6 = "VT_BYREF" + _VT_name_7 = "VT_RESERVED" + _VT_name_8 = "VT_ILLEGAL" +) + +var ( + _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} + _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} + _VT_index_2 = [...]uint8{0, 9, 19, 30} + _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} + _VT_index_4 = [...]uint8{0, 12, 21} + _VT_index_5 = [...]uint8{0, 8} + _VT_index_6 = [...]uint8{0, 8} + _VT_index_7 = [...]uint8{0, 11} + _VT_index_8 = [...]uint8{0, 10} +) + +func (i VT) String() string { + switch { + case 0 <= i && i <= 14: + return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] + case 16 <= i && i <= 31: + i -= 16 + return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] + case 36 <= i && i <= 38: + i -= 36 + return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] + case 64 <= i && i <= 72: + i -= 64 + return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] + case 4095 <= i && i <= 4096: + i -= 4095 + return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] + case i == 8192: + return _VT_name_5 + case i == 16384: + return _VT_name_6 + case i == 32768: + return _VT_name_7 + case i == 65535: + return _VT_name_8 + default: + return fmt.Sprintf("VT(%d)", i) + } +} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go new file mode 100644 index 00000000..4e9eca73 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt.go @@ -0,0 +1,99 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unicode/utf8" + "unsafe" +) + +var ( + procRoInitialize = modcombase.NewProc("RoInitialize") + procRoActivateInstance = modcombase.NewProc("RoActivateInstance") + procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") + procWindowsCreateString = modcombase.NewProc("WindowsCreateString") + procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") + procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") +) + +func RoInitialize(thread_type uint32) (err error) { + hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoActivateInstance.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoGetActivationFactory.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + u16 := syscall.StringToUTF16Ptr(s) + len := uint32(utf8.RuneCountInString(s)) + hr, _, _ := procWindowsCreateString.Call( + uintptr(unsafe.Pointer(u16)), + uintptr(len), + uintptr(unsafe.Pointer(&hstring))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// String returns Go string value of HString. +func (h HString) String() string { + var u16buf uintptr + var u16len uint32 + u16buf, _, _ = procWindowsGetStringRawBuffer.Call( + uintptr(h), + uintptr(unsafe.Pointer(&u16len))) + + u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} + u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) + return syscall.UTF16ToString(u16) +} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go new file mode 100644 index 00000000..52e6d74c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go @@ -0,0 +1,36 @@ +// +build !windows + +package ole + +// RoInitialize +func RoInitialize(thread_type uint32) (err error) { + return NewError(E_NOTIMPL) +} + +// RoActivateInstance +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// RoGetActivationFactory +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + return HString(uintptr(0)), NewError(E_NOTIMPL) +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + return NewError(E_NOTIMPL) +} + +// String returns Go string value of HString. +func (h HString) String() string { + return "" +} diff --git a/vendor/github.com/jaypipes/ghw/.gitignore b/vendor/github.com/jaypipes/ghw/.gitignore new file mode 100644 index 00000000..34d0d840 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/.gitignore @@ -0,0 +1,3 @@ +vendor/ +coverage*.* +*~ diff --git a/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..a4b37714 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md @@ -0,0 +1,134 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[INSERT CONTACT METHOD]. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available +at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations + diff --git a/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md new file mode 100644 index 00000000..b790517b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md @@ -0,0 +1,54 @@ +# How to Contribute + +We welcome any and all contributions to `ghw`! Filing [bug reports][gh-issues], +asking questions and submitting patches are all encouraged. + +[gh-issues]: https://github.com/jaypipes/ghw/issues + +## Submitting patches via pull requests + +We use GitHub pull requests to review code submissions. + +Consult [GitHub Help][pr-help] for more information on using pull requests. + +[pr-help]: https://help.github.com/articles/about-pull-requests/ + +We ask that contributors submitting a pull request sign their commits and +attest to the Developer Certificate of Origin (DCO). + +## Developer Certificate of Origin + +The DCO is a lightweight way for contributors to certify that they wrote or +otherwise have the right to submit the code they are contributing to the +project. Here is the [full text of the DCO][dco], reformatted for readability: + +> By making a contribution to this project, I certify that: +> +> a. The contribution was created in whole or in part by me and I have the +> right to submit it under the open source license indicated in the file; or +> +> b. The contribution is based upon previous work that, to the best of my +> knowledge, is covered under an appropriate open source license and I have the +> right under that license to submit that work with modifications, whether +> created in whole or in part by me, under the same open source license (unless +> I am permitted to submit under a different license), as indicated in the +> file; or +> +> c. The contribution was provided directly to me by some other person who +> certified (a), (b) or (c) and I have not modified it. +> +> d. I understand and agree that this project and the contribution are public +> and that a record of the contribution (including all personal information I +> submit with it, including my sign-off) is maintained indefinitely and may be +> redistributed consistent with this project or the open source license(s) +> involved. + +[dco]: https://developercertificate.org/ + +You can sign your commits using `git commit -s` before pushing commits to +Github and creating a pull request. + +## Community Guidelines + +1. Be kind. +2. Seriously, that's it. diff --git a/vendor/github.com/jaypipes/ghw/COPYING b/vendor/github.com/jaypipes/ghw/COPYING new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/COPYING @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/vendor/github.com/jaypipes/ghw/Dockerfile b/vendor/github.com/jaypipes/ghw/Dockerfile new file mode 100644 index 00000000..ee5d2761 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.21-buster as builder +WORKDIR /go/src/github.com/jaypipes/ghw + +ENV GOPROXY=direct + +# go.mod and go.sum go into their own layers. +COPY go.mod . +COPY go.sum . + +# This ensures `go mod download` happens only when go.mod and go.sum change. +RUN go mod download + +COPY . . + +RUN CGO_ENABLED=0 go build -o ghwc ./cmd/ghwc/ + +FROM alpine:3.7@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 +RUN apk add --no-cache ethtool + +WORKDIR /bin + +COPY --from=builder /go/src/github.com/jaypipes/ghw/ghwc /bin + +CMD ghwc diff --git a/vendor/github.com/jaypipes/ghw/Makefile b/vendor/github.com/jaypipes/ghw/Makefile new file mode 100644 index 00000000..75d2bcc8 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/Makefile @@ -0,0 +1,16 @@ +.PHONY: test +test: vet + go test -v ./... + +.PHONY: fmt +fmt: + @echo "Running gofmt on all sources..." + @gofmt -s -l -w . + +.PHONY: fmtcheck +fmtcheck: + @bash -c "diff -u <(echo -n) <(gofmt -d .)" + +.PHONY: vet +vet: + go vet ./... diff --git a/vendor/github.com/jaypipes/ghw/README.md b/vendor/github.com/jaypipes/ghw/README.md new file mode 100644 index 00000000..bad7c86e --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/README.md @@ -0,0 +1,1461 @@ +# `ghw` - Go HardWare discovery/inspection library + +[![Go Reference](https://pkg.go.dev/badge/github.com/jaypipes/ghw.svg)](https://pkg.go.dev/github.com/jaypipes/ghw) +[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/ghw)](https://goreportcard.com/report/github.com/jaypipes/ghw) +[![Build Status](https://github.com/jaypipes/ghw/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/jaypipes/ghw/actions) +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md) + +![ghw mascot](images/ghw-gopher.png) + +`ghw` is a Go library providing hardware inspection and discovery for Linux and +Windows. There currently exists partial support for MacOSX. + +## Design Principles + +* No root privileges needed for discovery + + `ghw` goes the extra mile to be useful without root priveleges. We query for + host hardware information as directly as possible without relying on shellouts + to programs like `dmidecode` that require root privileges to execute. + + Elevated privileges are indeed required to query for some information, but + `ghw` will never error out if blocked from reading that information. Instead, + `ghw` will print a warning message about the information that could not be + retrieved. You may disable these warning messages with the + `GHW_DISABLE_WARNINGS` environment variable. + +* Well-documented code and plenty of example code + + The code itself should be well-documented with lots of usage examples. + +* Interfaces should be consistent across modules + + Each module in the library should be structured in a consistent fashion, and + the structs returned by various library functions should have consistent + attribute and method names. + +## Inspecting != Monitoring + +`ghw` is a tool for gathering information about your hardware's **capacity** +and **capabilities**. + +It is important to point out that `ghw` does **NOT** report information that is +temporary or variable. It is **NOT** a system monitor nor is it an appropriate +tool for gathering data points for metrics that change over time. If you are +looking for a system that tracks **usage** of CPU, memory, network I/O or disk +I/O, there are plenty of great open source tools that do this! Check out the +[Prometheus project](https://prometheus.io/) for a great example. + +## Usage + +`ghw` has functions that return an `Info` object about a particular hardware +domain (e.g. CPU, Memory, Block storage, etc). + +Use the following functions in `ghw` to inspect information about the host +hardware: + +* [`ghw.CPU()`](#cpu) +* [`ghw.Memory()`](#memory) +* [`ghw.Block()`](#block-storage) (block storage) +* [`ghw.Topology()`](#topology) (processor architecture, NUMA topology and + memory cache hierarchy) +* [`ghw.Network()`](#network) +* [`ghw.PCI()`](#pci) +* [`ghw.GPU()`](#gpu) (graphical processing unit) +* [`ghw.Accelerator()`](#accelerator) (processing accelerators, AI) +* [`ghw.Chassis()`](#chassis) +* [`ghw.BIOS()`](#bios) +* [`ghw.Baseboard()`](#baseboard) +* [`ghw.Product()`](#product) + +### CPU + +The `ghw.CPU()` function returns a `ghw.CPUInfo` struct that contains +information about the CPUs on the host system. + +`ghw.CPUInfo` contains the following fields: + +* `ghw.CPUInfo.TotalCores` has the total number of physical cores the host + system contains +* `ghw.CPUInfo.TotalHardwareThreads` has the total number of hardware threads + the host system contains +* `ghw.CPUInfo.Processors` is an array of `ghw.Processor` structs, one for each + physical processor package contained in the host + +Each `ghw.Processor` struct contains a number of fields: + +* `ghw.Processor.ID` is the physical processor `uint32` ID according to the + system +* `ghw.Processor.TotalCores` is the number of physical cores in the processor + package +* `ghw.Processor.TotalHardwareThreads` is the number of hardware threads in the + processor package +* `ghw.Processor.Vendor` is a string containing the vendor name +* `ghw.Processor.Model` is a string containing the vendor's model name +* `ghw.Processor.Capabilities` (Linux only) is an array of strings indicating + the features the processor has enabled +* `ghw.Processor.Cores` (Linux only) is an array of `ghw.ProcessorCore` structs + that are packed onto this physical processor + +A `ghw.ProcessorCore` has the following fields: + +* `ghw.ProcessorCore.ID` is the `uint32` identifier that the host gave this + core. Note that this does *not* necessarily equate to a zero-based index of + the core within a physical package. For example, the core IDs for an Intel Core + i7 are 0, 1, 2, 8, 9, and 10 +* `ghw.ProcessorCore.TotalHardwareThreads` is the number of hardware threads + associated with the core +* `ghw.ProcessorCore.LogicalProcessors` is an array of ints representing the + logical processor IDs assigned to any processing unit for the core. These are + sometimes called the "thread siblings". Logical processor IDs are the + *zero-based* index of the processor on the host and are *not* related to the + core ID. + +```go +package main + +import ( + "fmt" + "math" + "strings" + + "github.com/jaypipes/ghw" +) + +func main() { + cpu, err := ghw.CPU() + if err != nil { + fmt.Printf("Error getting CPU info: %v", err) + } + + fmt.Printf("%v\n", cpu) + + for _, proc := range cpu.Processors { + fmt.Printf(" %v\n", proc) + for _, core := range proc.Cores { + fmt.Printf(" %v\n", core) + } + if len(proc.Capabilities) > 0 { + // pretty-print the (large) block of capability strings into rows + // of 6 capability strings + rows := int(math.Ceil(float64(len(proc.Capabilities)) / float64(6))) + for row := 1; row < rows; row = row + 1 { + rowStart := (row * 6) - 1 + rowEnd := int(math.Min(float64(rowStart+6), float64(len(proc.Capabilities)))) + rowElems := proc.Capabilities[rowStart:rowEnd] + capStr := strings.Join(rowElems, " ") + if row == 1 { + fmt.Printf(" capabilities: [%s\n", capStr) + } else if rowEnd < len(proc.Capabilities) { + fmt.Printf(" %s\n", capStr) + } else { + fmt.Printf(" %s]\n", capStr) + } + } + } + } +} +``` + +Example output from my personal workstation: + +``` +cpu (1 physical package, 6 cores, 12 hardware threads) + physical package #0 (6 cores, 12 hardware threads) + processor core #0 (2 threads), logical processors [0 6] + processor core #1 (2 threads), logical processors [1 7] + processor core #2 (2 threads), logical processors [2 8] + processor core #3 (2 threads), logical processors [3 9] + processor core #4 (2 threads), logical processors [4 10] + processor core #5 (2 threads), logical processors [5 11] + capabilities: [msr pae mce cx8 apic sep + mtrr pge mca cmov pat pse36 + clflush dts acpi mmx fxsr sse + sse2 ss ht tm pbe syscall + nx pdpe1gb rdtscp lm constant_tsc arch_perfmon + pebs bts rep_good nopl xtopology nonstop_tsc + cpuid aperfmperf pni pclmulqdq dtes64 monitor + ds_cpl vmx est tm2 ssse3 cx16 + xtpr pdcm pcid sse4_1 sse4_2 popcnt + aes lahf_lm pti retpoline tpr_shadow vnmi + flexpriority ept vpid dtherm ida arat] +``` + +### Memory + +The `ghw.Memory()` function returns a `ghw.MemoryInfo` struct that contains +information about the RAM on the host system. + +`ghw.MemoryInfo` contains the following fields: + +* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on + the host +* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the + system can actually use. Usable memory accounts for things like the kernel's + resident memory size and some reserved system bits. Please note this value is + **NOT** the amount of memory currently in use by processes in the system. See + [the discussion][#physical-versus-usage-memory] about the difference. +* `ghw.MemoryInfo.SupportedPageSizes` is an array of integers representing the + size, in bytes, of memory pages the system supports +* `ghw.MemoryInfo.Modules` is an array of pointers to `ghw.MemoryModule` + structs, one for each physical [DIMM](https://en.wikipedia.org/wiki/DIMM). + Currently, this information is only included on Windows, with Linux support + [planned](https://github.com/jaypipes/ghw/pull/171#issuecomment-597082409). + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + memory, err := ghw.Memory() + if err != nil { + fmt.Printf("Error getting memory info: %v", err) + } + + fmt.Println(memory.String()) +} +``` + +Example output from my personal workstation: + +``` +memory (24GB physical, 24GB usable) +``` + +#### Physical versus Usable Memory + +There has been [some](https://github.com/jaypipes/ghw/pull/171) +[confusion](https://github.com/jaypipes/ghw/issues/183) regarding the +difference between the total physical bytes versus total usable bytes of +memory. + +Some of this confusion has been due to a misunderstanding of the term "usable". +As mentioned [above](#inspection!=monitoring), `ghw` does inspection of the +system's capacity. + +A host computer has two capacities when it comes to RAM. The first capacity is +the amount of RAM that is contained in all memory banks (DIMMs) that are +attached to the motherboard. `ghw.MemoryInfo.TotalPhysicalBytes` refers to this +first capacity. + +There is a (usually small) amount of RAM that is consumed by the bootloader +before the operating system is started (booted). Once the bootloader has booted +the operating system, the amount of RAM that may be used by the operating +system and its applications is fixed. `ghw.MemoryInfo.TotalUsableBytes` refers +to this second capacity. + +You can determine the amount of RAM that the bootloader used (that is not made +available to the operating system) by subtracting +`ghw.MemoryInfo.TotalUsableBytes` from `ghw.MemoryInfo.TotalPhysicalBytes`: + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + memory, err := ghw.Memory() + if err != nil { + fmt.Printf("Error getting memory info: %v", err) + } + + phys := memory.TotalPhysicalBytes + usable := memory.TotalUsableBytes + + fmt.Printf("The bootloader consumes %d bytes of RAM\n", phys - usable) +} +``` + +Example output from my personal workstation booted into a Windows10 operating +system with a Linux GRUB bootloader: + +``` +The bootloader consumes 3832720 bytes of RAM +``` + +### Block storage + +The `ghw.Block()` function returns a `ghw.BlockInfo` struct that contains +information about the block storage on the host system. + +`ghw.BlockInfo` contains the following fields: + +* `ghw.BlockInfo.TotalSizeBytes` contains the amount of physical block storage + on the host. +* `ghw.BlockInfo.Disks` is an array of pointers to `ghw.Disk` structs, one for + each disk found by the system + +Each `ghw.Disk` struct contains the following fields: + +* `ghw.Disk.Name` contains a string with the short name of the disk, e.g. "sda" +* `ghw.Disk.SizeBytes` contains the amount of storage the disk provides +* `ghw.Disk.PhysicalBlockSizeBytes` contains the size of the physical blocks + used on the disk, in bytes. This is typically the minimum amount of data that + will be written in a single write operation for the disk. +* `ghw.Disk.IsRemovable` contains a boolean indicating if the disk drive is + removable +* `ghw.Disk.DriveType` is the type of drive. It is of type `ghw.DriveType` + which has a `ghw.DriveType.String()` method that can be called to return a + string representation of the bus. This string will be `HDD`, `FDD`, `ODD`, + or `SSD`, which correspond to a hard disk drive (rotational), floppy drive, + optical (CD/DVD) drive and solid-state drive. +* `ghw.Disk.StorageController` is the type of storage controller. It is of type + `ghw.StorageController` which has a `ghw.StorageController.String()` method + that can be called to return a string representation of the bus. This string + will be `SCSI`, `IDE`, `virtio`, `MMC`, or `NVMe` +* `ghw.Disk.BusPath` (Linux, Darwin only) is the filepath to the bus used by + the disk. +* `ghw.Disk.NUMANodeID` (Linux only) is the numeric index of the NUMA node this + disk is local to, or -1 if the host system is not a NUMA system or is not + Linux. +* `ghw.Disk.Vendor` contains a string with the name of the hardware vendor for + the disk +* `ghw.Disk.Model` contains a string with the vendor-assigned disk model name +* `ghw.Disk.SerialNumber` contains a string with the disk's serial number +* `ghw.Disk.WWN` contains a string with the disk's + [World Wide Name](https://en.wikipedia.org/wiki/World_Wide_Name) +* `ghw.Disk.Partitions` contains an array of pointers to `ghw.Partition` + structs, one for each partition on the disk + +Each `ghw.Partition` struct contains these fields: + +* `ghw.Partition.Name` contains a string with the short name of the partition, + e.g. `sda1` +* `ghw.Partition.Label` contains the label for the partition itself. On Linux + systems, this is derived from the `ID_PART_ENTRY_NAME` [udev][udev] entry for + the partition. +* `ghw.Partition.FilesystemLabel` contains the label for the filesystem housed + on the partition. On Linux systems, this is derived from the `ID_FS_NAME` + [udev][udev] entry for the partition. +* `ghw.Partition.SizeBytes` contains the amount of storage the partition + provides +* `ghw.Partition.MountPoint` contains a string with the partition's mount + point, or `""` if no mount point was discovered +* `ghw.Partition.Type` contains a string indicated the filesystem type for the + partition, or `""` if the system could not determine the type +* `ghw.Partition.IsReadOnly` is a bool indicating the partition is read-only +* `ghw.Partition.Disk` is a pointer to the `ghw.Disk` object associated with + the partition. +* `ghw.Partition.UUID` is a string containing the partition UUID on Linux, the + partition UUID on MacOS and nothing on Windows. On Linux systems, this is + derived from the `ID_PART_ENTRY_UUID` [udev][udev] entry for the partition. + +[udev]: https://en.wikipedia.org/wiki/Udev + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + block, err := ghw.Block() + if err != nil { + fmt.Printf("Error getting block storage info: %v", err) + } + + fmt.Printf("%v\n", block) + + for _, disk := range block.Disks { + fmt.Printf(" %v\n", disk) + for _, part := range disk.Partitions { + fmt.Printf(" %v\n", part) + } + } +} +``` + +Example output from my personal workstation: + +``` +block storage (1 disk, 2TB physical storage) + sda HDD (2TB) SCSI [@pci-0000:04:00.0-scsi-0:1:0:0 (node #0)] vendor=LSI model=Logical_Volume serial=600508e000000000f8253aac9a1abd0c WWN=0x600508e000000000f8253aac9a1abd0c + /dev/sda1 (100MB) + /dev/sda2 (187GB) + /dev/sda3 (449MB) + /dev/sda4 (1KB) + /dev/sda5 (15GB) + /dev/sda6 (2TB) [ext4] mounted@/ +``` + +> **NOTE**: `ghw` looks in the udev runtime database for some information. If +> you are using `ghw` in a container, remember to bind mount `/dev/disk` and +> `/run` into your container, otherwise `ghw` won't be able to query the udev +> DB or sysfs paths for information. + +### Topology + +> **NOTE**: Topology support is currently Linux-only. Windows support is +> [planned](https://github.com/jaypipes/ghw/issues/166). + +The `ghw.Topology()` function returns a `ghw.TopologyInfo` struct that contains +information about the host computer's architecture (NUMA vs. SMP), the host's +NUMA node layout and processor-specific memory caches. + +The `ghw.TopologyInfo` struct contains two fields: + +* `ghw.TopologyInfo.Architecture` contains an enum with the value `ghw.NUMA` or + `ghw.SMP` depending on what the topology of the system is +* `ghw.TopologyInfo.Nodes` is an array of pointers to `ghw.TopologyNode` + structs, one for each topology node (typically physical processor package) + found by the system + +Each `ghw.TopologyNode` struct contains the following fields: + +* `ghw.TopologyNode.ID` is the system's `uint32` identifier for the node +* `ghw.TopologyNode.Memory` is a `ghw.MemoryArea` struct describing the memory + attached to this node. +* `ghw.TopologyNode.Cores` is an array of pointers to `ghw.ProcessorCore` structs that + are contained in this node +* `ghw.TopologyNode.Caches` is an array of pointers to `ghw.MemoryCache` structs that + represent the low-level caches associated with processors and cores on the + system +* `ghw.TopologyNode.Distance` is an array of distances between NUMA nodes as reported + by the system. + +`ghw.MemoryArea` describes a collection of *physical* RAM on the host. + +In the simplest and most common case, all system memory fits in a single memory +area. In more complex host systems, like [NUMA systems][numa], many memory +areas may be present in the host system (e.g. one for each NUMA cell). + +[numa]: https://en.wikipedia.org/wiki/Non-uniform_memory_access + +The `ghw.MemoryArea` struct contains the following fields: + +* `ghw.MemoryArea.TotalPhysicalBytes` contains the amount of physical memory + associated with this memory area. +* `ghw.MemoryArea.TotalUsableBytes` contains the amount of memory of this + memory area the system can actually use. Usable memory accounts for things + like the kernel's resident memory size and some reserved system bits. Please + note this value is **NOT** the amount of memory currently in use by processes + in the system. See [the discussion][#physical-versus-usage-memory] about + the difference. + +See above in the [CPU](#cpu) section for information about the +`ghw.ProcessorCore` struct and how to use and query it. + +Each `ghw.MemoryCache` struct contains the following fields: + +* `ghw.MemoryCache.Type` is an enum that contains one of `ghw.DATA`, + `ghw.INSTRUCTION` or `ghw.UNIFIED` depending on whether the cache stores CPU + instructions, program data, or both +* `ghw.MemoryCache.Level` is a positive integer indicating how close the cache + is to the processor. The lower the number, the closer the cache is to the + processor and the faster the processor can access its contents +* `ghw.MemoryCache.SizeBytes` is an integer containing the number of bytes the + cache can contain +* `ghw.MemoryCache.LogicalProcessors` is an array of integers representing the + logical processors that use the cache + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + topology, err := ghw.Topology() + if err != nil { + fmt.Printf("Error getting topology info: %v", err) + } + + fmt.Printf("%v\n", topology) + + for _, node := range topology.Nodes { + fmt.Printf(" %v\n", node) + for _, cache := range node.Caches { + fmt.Printf(" %v\n", cache) + } + } +} +``` + +Example output from my personal workstation: + +``` +topology SMP (1 nodes) + node #0 (6 cores) + L1i cache (32 KB) shared with logical processors: 3,9 + L1i cache (32 KB) shared with logical processors: 2,8 + L1i cache (32 KB) shared with logical processors: 11,5 + L1i cache (32 KB) shared with logical processors: 10,4 + L1i cache (32 KB) shared with logical processors: 0,6 + L1i cache (32 KB) shared with logical processors: 1,7 + L1d cache (32 KB) shared with logical processors: 11,5 + L1d cache (32 KB) shared with logical processors: 10,4 + L1d cache (32 KB) shared with logical processors: 3,9 + L1d cache (32 KB) shared with logical processors: 1,7 + L1d cache (32 KB) shared with logical processors: 0,6 + L1d cache (32 KB) shared with logical processors: 2,8 + L2 cache (256 KB) shared with logical processors: 2,8 + L2 cache (256 KB) shared with logical processors: 3,9 + L2 cache (256 KB) shared with logical processors: 0,6 + L2 cache (256 KB) shared with logical processors: 10,4 + L2 cache (256 KB) shared with logical processors: 1,7 + L2 cache (256 KB) shared with logical processors: 11,5 + L3 cache (12288 KB) shared with logical processors: 0,1,10,11,2,3,4,5,6,7,8,9 +``` + +### Network + +The `ghw.Network()` function returns a `ghw.NetworkInfo` struct that contains +information about the host computer's networking hardware. + +The `ghw.NetworkInfo` struct contains one field: + +* `ghw.NetworkInfo.NICs` is an array of pointers to `ghw.NIC` structs, one + for each network interface controller found for the systen + +Each `ghw.NIC` struct contains the following fields: + +* `ghw.NIC.Name` is the system's identifier for the NIC +* `ghw.NIC.MACAddress` is the Media Access Control (MAC) address for the NIC, + if any +* `ghw.NIC.IsVirtual` is a boolean indicating if the NIC is a virtualized + device +* `ghw.NIC.Capabilities` (Linux only) is an array of pointers to + `ghw.NICCapability` structs that can describe the things the NIC supports. + These capabilities match the returned values from the `ethtool -k ` + call on Linux as well as the AutoNegotiation and PauseFrameUse capabilities + from `ethtool`. +* `ghw.NIC.PCIAddress` (Linux only) is the PCI device address of the device + backing the NIC. this is not-nil only if the backing device is indeed a PCI + device; more backing devices (e.g. USB) will be added in future versions. +* `ghw.NIC.Speed` (Linux only) is a string showing the current link speed. On + Linux, this field will be present even if `ethtool` is not available. +* `ghw.NIC.Duplex` (Linux only) is a string showing the current link duplex. On + Linux, this field will be present even if `ethtool` is not available. +* `ghw.NIC.SupportedLinkModes` (Linux only) is a string slice containing a list + of supported link modes, e.g. "10baseT/Half", "1000baseT/Full". +* `ghw.NIC.SupportedPorts` (Linux only) is a string slice containing the list + of supported port types, e.g. "MII", "TP", "FIBRE", "Twisted Pair". +* `ghw.NIC.SupportedFECModes` (Linux only) is a string slice containing a list + of supported Forward Error Correction (FEC) Modes. +* `ghw.NIC.AdvertisedLinkModes` (Linux only) is a string slice containing the + link modes being advertised during auto negotiation. +* `ghw.NIC.AdvertisedFECModes` (Linux only) is a string slice containing the + Forward Error Correction (FEC) modes advertised during auto negotiation. + +The `ghw.NICCapability` struct contains the following fields: + +* `ghw.NICCapability.Name` is the string name of the capability (e.g. + "tcp-segmentation-offload") +* `ghw.NICCapability.IsEnabled` is a boolean indicating whether the capability + is currently enabled/active on the NIC +* `ghw.NICCapability.CanEnable` is a boolean indicating whether the capability + may be enabled + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + net, err := ghw.Network() + if err != nil { + fmt.Printf("Error getting network info: %v", err) + } + + fmt.Printf("%v\n", net) + + for _, nic := range net.NICs { + fmt.Printf(" %v\n", nic) + + enabledCaps := make([]int, 0) + for x, cap := range nic.Capabilities { + if cap.IsEnabled { + enabledCaps = append(enabledCaps, x) + } + } + if len(enabledCaps) > 0 { + fmt.Printf(" enabled capabilities:\n") + for _, x := range enabledCaps { + fmt.Printf(" - %s\n", nic.Capabilities[x].Name) + } + } + } +} +``` + +Example output from my personal laptop: + +``` +net (3 NICs) + docker0 + enabled capabilities: + - tx-checksumming + - tx-checksum-ip-generic + - scatter-gather + - tx-scatter-gather + - tx-scatter-gather-fraglist + - tcp-segmentation-offload + - tx-tcp-segmentation + - tx-tcp-ecn-segmentation + - tx-tcp-mangleid-segmentation + - tx-tcp6-segmentation + - udp-fragmentation-offload + - generic-segmentation-offload + - generic-receive-offload + - tx-vlan-offload + - highdma + - tx-lockless + - netns-local + - tx-gso-robust + - tx-fcoe-segmentation + - tx-gre-segmentation + - tx-gre-csum-segmentation + - tx-ipxip4-segmentation + - tx-ipxip6-segmentation + - tx-udp_tnl-segmentation + - tx-udp_tnl-csum-segmentation + - tx-gso-partial + - tx-sctp-segmentation + - tx-esp-segmentation + - tx-vlan-stag-hw-insert + enp58s0f1 + enabled capabilities: + - rx-checksumming + - generic-receive-offload + - rx-vlan-offload + - tx-vlan-offload + - highdma + - auto-negotiation + wlp59s0 + enabled capabilities: + - scatter-gather + - tx-scatter-gather + - generic-segmentation-offload + - generic-receive-offload + - highdma + - netns-local +``` + +### PCI + +`ghw` contains a PCI database inspection and querying facility that allows +developers to not only gather information about devices on a local PCI bus but +also query for information about hardware device classes, vendor and product +information. + +> **NOTE**: Parsing of the PCI-IDS file database is provided by the separate +> [github.com/jaypipes/pcidb library](http://github.com/jaypipes/pcidb). You +> can read that library's README for more information about the various structs +> that are exposed on the `ghw.PCIInfo` struct. + +The `ghw.PCI()` function returns a `ghw.PCIInfo` struct that contains +information about the host computer's PCI devices. + +The `ghw.PCIInfo` struct contains one field: + +* `ghw.PCIInfo.Devices` is a slice of pointers to `ghw.PCIDevice` structs that + describe the PCI devices on the host system + +> **NOTE**: PCI products are often referred to by their "device ID". We use the +> term "product ID" in `ghw` because it more accurately reflects what the +> identifier is for: a specific product line produced by the vendor. + +The `ghw.PCIDevice` struct has the following fields: + +* `ghw.PCIDevice.Vendor` is a pointer to a `pcidb.Vendor` struct that + describes the device's primary vendor. This will always be non-nil. +* `ghw.PCIDevice.Product` is a pointer to a `pcidb.Product` struct that + describes the device's primary product. This will always be non-nil. +* `ghw.PCIDevice.Subsystem` is a pointer to a `pcidb.Product` struct that + describes the device's secondary/sub-product. This will always be non-nil. +* `ghw.PCIDevice.Class` is a pointer to a `pcidb.Class` struct that + describes the device's class. This will always be non-nil. +* `ghw.PCIDevice.Subclass` is a pointer to a `pcidb.Subclass` struct + that describes the device's subclass. This will always be non-nil. +* `ghw.PCIDevice.ProgrammingInterface` is a pointer to a + `pcidb.ProgrammingInterface` struct that describes the device subclass' + programming interface. This will always be non-nil. +* `ghw.PCIDevice.Driver` is a string representing the device driver the + system is using to handle this device. Can be empty string if this + information is not available. If the information is not available, this does + not mean the device is not functioning, but rather that `ghw` was not able to + retrieve driver information. + +The `ghw.PCIAddress` (which is an alias for the `ghw.pci.address.Address` +struct) contains the PCI address fields. It has a `ghw.PCIAddress.String()` +method that returns the canonical Domain:Bus:Device.Function ([D]BDF) +representation of this Address. + +The `ghw.PCIAddress` struct has the following fields: + +* `ghw.PCIAddress.Domain` is a string representing the PCI domain component of + the address. +* `ghw.PCIAddress.Bus` is a string representing the PCI bus component of + the address. +* `ghw.PCIAddress.Device` is a string representing the PCI device component of + the address. +* `ghw.PCIAddress.Function` is a string representing the PCI function component of + the address. + +> **NOTE**: Older versions (pre-`v0.9.0`) erroneously referred to the `Device` +> field as the `Slot` field. As noted by [@pearsonk](https://github.com/pearsonk) +> in [#220](https://github.com/jaypipes/ghw/issues/220), this was a misnomer. + +The following code snippet shows how to list the PCI devices on the host system +and output a simple list of PCI address and vendor/product information: + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + pci, err := ghw.PCI() + if err != nil { + fmt.Printf("Error getting PCI info: %v", err) + } + fmt.Printf("host PCI devices:\n") + fmt.Println("====================================================") + + for _, device := range pci.Devices { + vendor := device.Vendor + vendorName := vendor.Name + if len(vendor.Name) > 20 { + vendorName = string([]byte(vendorName)[0:17]) + "..." + } + product := device.Product + productName := product.Name + if len(product.Name) > 40 { + productName = string([]byte(productName)[0:37]) + "..." + } + fmt.Printf("%-12s\t%-20s\t%-40s\n", device.Address, vendorName, productName) + } +} +``` + +on my local workstation the output of the above looks like the following: + +``` +host PCI devices: +==================================================== +0000:00:00.0 Intel Corporation 5520/5500/X58 I/O Hub to ESI Port +0000:00:01.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo... +0000:00:02.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo... +0000:00:03.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo... +0000:00:07.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo... +0000:00:10.0 Intel Corporation 7500/5520/5500/X58 Physical and Link ... +0000:00:10.1 Intel Corporation 7500/5520/5500/X58 Routing and Protoc... +0000:00:14.0 Intel Corporation 7500/5520/5500/X58 I/O Hub System Man... +0000:00:14.1 Intel Corporation 7500/5520/5500/X58 I/O Hub GPIO and S... +0000:00:14.2 Intel Corporation 7500/5520/5500/X58 I/O Hub Control St... +0000:00:14.3 Intel Corporation 7500/5520/5500/X58 I/O Hub Throttle R... +0000:00:19.0 Intel Corporation 82567LF-2 Gigabit Network Connection +0000:00:1a.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1a.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1a.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1a.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont... +0000:00:1b.0 Intel Corporation 82801JI (ICH10 Family) HD Audio Contr... +0000:00:1c.0 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro... +0000:00:1c.1 Intel Corporation 82801JI (ICH10 Family) PCI Express Po... +0000:00:1c.4 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro... +0000:00:1d.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1d.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1d.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr... +0000:00:1d.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont... +0000:00:1e.0 Intel Corporation 82801 PCI Bridge +0000:00:1f.0 Intel Corporation 82801JIR (ICH10R) LPC Interface Contr... +0000:00:1f.2 Intel Corporation 82801JI (ICH10 Family) SATA AHCI Cont... +0000:00:1f.3 Intel Corporation 82801JI (ICH10 Family) SMBus Controller +0000:01:00.0 NEC Corporation uPD720200 USB 3.0 Host Controller +0000:02:00.0 Marvell Technolog... 88SE9123 PCIe SATA 6.0 Gb/s controller +0000:02:00.1 Marvell Technolog... 88SE912x IDE Controller +0000:03:00.0 NVIDIA Corporation GP107 [GeForce GTX 1050 Ti] +0000:03:00.1 NVIDIA Corporation UNKNOWN +0000:04:00.0 LSI Logic / Symbi... SAS2004 PCI-Express Fusion-MPT SAS-2 ... +0000:06:00.0 Qualcomm Atheros AR5418 Wireless Network Adapter [AR50... +0000:08:03.0 LSI Corporation FW322/323 [TrueFire] 1394a Controller +0000:3f:00.0 Intel Corporation UNKNOWN +0000:3f:00.1 Intel Corporation Xeon 5600 Series QuickPath Architectu... +0000:3f:02.0 Intel Corporation Xeon 5600 Series QPI Link 0 +0000:3f:02.1 Intel Corporation Xeon 5600 Series QPI Physical 0 +0000:3f:02.2 Intel Corporation Xeon 5600 Series Mirror Port Link 0 +0000:3f:02.3 Intel Corporation Xeon 5600 Series Mirror Port Link 1 +0000:3f:03.0 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:03.1 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:03.4 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:04.0 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:04.1 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:04.2 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:04.3 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:05.0 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:05.1 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:05.2 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:05.3 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:06.0 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:06.1 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:06.2 Intel Corporation Xeon 5600 Series Integrated Memory Co... +0000:3f:06.3 Intel Corporation Xeon 5600 Series Integrated Memory Co... +``` + +#### Finding a PCI device by PCI address + +In addition to the above information, the `ghw.PCIInfo` struct has the +following method: + +* `ghw.PCIInfo.GetDevice(address string)` + +The following code snippet shows how to call the `ghw.PCIInfo.GetDevice()` +method and use its returned `ghw.PCIDevice` struct pointer: + +```go +package main + +import ( + "fmt" + "os" + + "github.com/jaypipes/ghw" +) + +func main() { + pci, err := ghw.PCI() + if err != nil { + fmt.Printf("Error getting PCI info: %v", err) + } + + addr := "0000:00:00.0" + if len(os.Args) == 2 { + addr = os.Args[1] + } + fmt.Printf("PCI device information for %s\n", addr) + fmt.Println("====================================================") + deviceInfo := pci.GetDevice(addr) + if deviceInfo == nil { + fmt.Printf("could not retrieve PCI device information for %s\n", addr) + return + } + + vendor := deviceInfo.Vendor + fmt.Printf("Vendor: %s [%s]\n", vendor.Name, vendor.ID) + product := deviceInfo.Product + fmt.Printf("Product: %s [%s]\n", product.Name, product.ID) + subsystem := deviceInfo.Subsystem + subvendor := pci.Vendors[subsystem.VendorID] + subvendorName := "UNKNOWN" + if subvendor != nil { + subvendorName = subvendor.Name + } + fmt.Printf("Subsystem: %s [%s] (Subvendor: %s)\n", subsystem.Name, subsystem.ID, subvendorName) + class := deviceInfo.Class + fmt.Printf("Class: %s [%s]\n", class.Name, class.ID) + subclass := deviceInfo.Subclass + fmt.Printf("Subclass: %s [%s]\n", subclass.Name, subclass.ID) + progIface := deviceInfo.ProgrammingInterface + fmt.Printf("Programming Interface: %s [%s]\n", progIface.Name, progIface.ID) +} +``` + +Here's a sample output from my local workstation: + +``` +PCI device information for 0000:03:00.0 +==================================================== +Vendor: NVIDIA Corporation [10de] +Product: GP107 [GeForce GTX 1050 Ti] [1c82] +Subsystem: UNKNOWN [8613] (Subvendor: ASUSTeK Computer Inc.) +Class: Display controller [03] +Subclass: VGA compatible controller [00] +Programming Interface: VGA controller [00] +``` + +### GPU + +The `ghw.GPU()` function returns a `ghw.GPUInfo` struct that contains +information about the host computer's graphics hardware. + +The `ghw.GPUInfo` struct contains one field: + +* `ghw.GPUInfo.GraphicCards` is an array of pointers to `ghw.GraphicsCard` + structs, one for each graphics card found for the system + +Each `ghw.GraphicsCard` struct contains the following fields: + +* `ghw.GraphicsCard.Index` is the system's numeric zero-based index for the + card on the bus +* `ghw.GraphicsCard.Address` is the PCI address for the graphics card +* `ghw.GraphicsCard.DeviceInfo` is a pointer to a `ghw.PCIDevice` struct + describing the graphics card. This may be `nil` if no PCI device information + could be determined for the card. +* `ghw.GraphicsCard.Node` is an pointer to a `ghw.TopologyNode` struct that the + GPU/graphics card is affined to. On non-NUMA systems, this will always be + `nil`. + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + gpu, err := ghw.GPU() + if err != nil { + fmt.Printf("Error getting GPU info: %v", err) + } + + fmt.Printf("%v\n", gpu) + + for _, card := range gpu.GraphicsCards { + fmt.Printf(" %v\n", card) + } +} +``` + +Example output from my personal workstation: + +``` +gpu (1 graphics card) + card #0 @0000:03:00.0 -> class: 'Display controller' vendor: 'NVIDIA Corporation' product: 'GP107 [GeForce GTX 1050 Ti]' +``` + +**NOTE**: You can [read more](#pci) about the fields of the `ghw.PCIDevice` +struct if you'd like to dig deeper into PCI subsystem and programming interface +information + +**NOTE**: You can [read more](#topology) about the fields of the +`ghw.TopologyNode` struct if you'd like to dig deeper into the NUMA/topology +subsystem + +### Accelerator + +The `ghw.Accelerator()` function returns a `ghw.AcceleratorInfo` struct that contains +information about the host computer's processing accelerator hardware. In this category +we can find used hardware for AI. The hardware detected in this category will be +processing accelerators (PCI class `1200`), 3D controllers (`0302`) and Display +controllers (`0380`). + +The `ghw.AcceleratorInfo` struct contains one field: + +* `ghw.AcceleratorInfo.Devices` is an array of pointers to `ghw.AcceleratorDevice` + structs, one for each processing accelerator card found for the system. + +Each `ghw.AcceleratorDevice` struct contains the following fields: + +* `ghw.AcceleratorDevice.Address` is the PCI address for the processing accelerator card. +* `ghw.AcceleratorDevice.PCIDevice` is a pointer to a `ghw.PCIDevice` struct. + describing the processing accelerator card. This may be `nil` if no PCI device + information could be determined for the card. + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + accel, err := ghw.Accelerator() + if err != nil { + fmt.Printf("Error getting processing accelerator info: %v", err) + } + + fmt.Printf("%v\n", accel) + + for _, card := range accel.Devices { + fmt.Printf(" %v\n", device) + } +} +``` + +Example output from a testing machine: + +``` +processing accelerators (1 device) + device @0000:00:04.0 -> driver: 'fake_pci_driver' class: 'Processing accelerators' vendor: 'Red Hat, Inc.' product: 'QEMU PCI Test Device' +``` + +**NOTE**: You can [read more](#pci) about the fields of the `ghw.PCIDevice` +struct if you'd like to dig deeper into PCI subsystem and programming interface +information + +### Chassis + +The `ghw.Chassis()` function returns a `ghw.ChassisInfo` struct that contains +information about the host computer's hardware chassis. + +The `ghw.ChassisInfo` struct contains multiple fields: + +* `ghw.ChassisInfo.AssetTag` is a string with the chassis asset tag +* `ghw.ChassisInfo.SerialNumber` is a string with the chassis serial number +* `ghw.ChassisInfo.Type` is a string with the chassis type *code* +* `ghw.ChassisInfo.TypeDescription` is a string with a description of the + chassis type +* `ghw.ChassisInfo.Vendor` is a string with the chassis vendor +* `ghw.ChassisInfo.Version` is a string with the chassis version + +> **NOTE**: These fields are often missing for non-server hardware. Don't be +> surprised to see empty string or "None" values. + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + chassis, err := ghw.Chassis() + if err != nil { + fmt.Printf("Error getting chassis info: %v", err) + } + + fmt.Printf("%v\n", chassis) +} +``` + +Example output from my personal workstation: + +``` +chassis type=Desktop vendor=System76 version=thelio-r1 +``` + +> **NOTE**: Some of the values such as serial numbers are shown as unknown +> because the Linux kernel by default disallows access to those fields if +> you're not running as root. They will be populated if it runs as root or +> otherwise you may see warnings like the following: + +``` +WARNING: Unable to read chassis_serial: open /sys/class/dmi/id/chassis_serial: permission denied +``` + +You can ignore them or use the [Disabling warning messages](#disabling-warning-messages) +feature to quiet things down. + +### BIOS + +The `ghw.BIOS()` function returns a `ghw.BIOSInfo` struct that contains +information about the host computer's basis input/output system (BIOS). + +The `ghw.BIOSInfo` struct contains multiple fields: + +* `ghw.BIOSInfo.Vendor` is a string with the BIOS vendor +* `ghw.BIOSInfo.Version` is a string with the BIOS version +* `ghw.BIOSInfo.Date` is a string with the date the BIOS was flashed/created + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + bios, err := ghw.BIOS() + if err != nil { + fmt.Printf("Error getting BIOS info: %v", err) + } + + fmt.Printf("%v\n", bios) +} +``` + +Example output from my personal workstation: + +``` +bios vendor=System76 version=F2 Z5 date=11/14/2018 +``` + +### Baseboard + +The `ghw.Baseboard()` function returns a `ghw.BaseboardInfo` struct that +contains information about the host computer's hardware baseboard. + +The `ghw.BaseboardInfo` struct contains multiple fields: + +* `ghw.BaseboardInfo.AssetTag` is a string with the baseboard asset tag +* `ghw.BaseboardInfo.SerialNumber` is a string with the baseboard serial number +* `ghw.BaseboardInfo.Vendor` is a string with the baseboard vendor +* `ghw.BaseboardInfo.Product` is a string with the baseboard name on Linux and + Product on Windows +* `ghw.BaseboardInfo.Version` is a string with the baseboard version + +> **NOTE**: These fields are often missing for non-server hardware. Don't be +> surprised to see empty string or "None" values. + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + baseboard, err := ghw.Baseboard() + if err != nil { + fmt.Printf("Error getting baseboard info: %v", err) + } + + fmt.Printf("%v\n", baseboard) +} +``` + +Example output from my personal workstation: + +``` +baseboard vendor=System76 version=thelio-r1 +``` + +> **NOTE**: Some of the values such as serial numbers are shown as unknown +> because the Linux kernel by default disallows access to those fields if +> you're not running as root. They will be populated if it runs as root or +> otherwise you may see warnings like the following: + +``` +WARNING: Unable to read board_serial: open /sys/class/dmi/id/board_serial: permission denied +``` + +You can ignore them or use the [Disabling warning messages](#disabling-warning-messages) +feature to quiet things down. + +### Product + +The `ghw.Product()` function returns a `ghw.ProductInfo` struct that +contains information about the host computer's hardware product line. + +The `ghw.ProductInfo` struct contains multiple fields: + +* `ghw.ProductInfo.Family` is a string describing the product family +* `ghw.ProductInfo.Name` is a string with the product name +* `ghw.ProductInfo.SerialNumber` is a string with the product serial number +* `ghw.ProductInfo.UUID` is a string with the product UUID +* `ghw.ProductInfo.SKU` is a string with the product stock unit identifier + (SKU) +* `ghw.ProductInfo.Vendor` is a string with the product vendor +* `ghw.ProductInfo.Version` is a string with the product version + +> **NOTE**: These fields are often missing for non-server hardware. Don't be +> surprised to see empty string, "Default string" or "None" values. + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + product, err := ghw.Product() + if err != nil { + fmt.Printf("Error getting product info: %v", err) + } + + fmt.Printf("%v\n", product) +} +``` + +Example output from my personal workstation: + +``` +product family=Default string name=Thelio vendor=System76 sku=Default string version=thelio-r1 +``` + +> **NOTE**: Some of the values such as serial numbers are shown as unknown +> because the Linux kernel by default disallows access to those fields if +> you're not running as root. They will be populated if it runs as root or +> otherwise you may see warnings like the following: + +``` +WARNING: Unable to read product_serial: open /sys/class/dmi/id/product_serial: permission denied +``` + +You can ignore them or use the [Disabling warning messages](#disabling-warning-messages) +feature to quiet things down. + +## Advanced Usage + +### Disabling warning messages + +When `ghw` isn't able to retrieve some information, it may print certain +warning messages to `stderr`. To disable these warnings, simply set the +`GHW_DISABLE_WARNINGS` environs variable: + +``` +$ ghwc memory +WARNING: +Could not determine total physical bytes of memory. This may +be due to the host being a virtual machine or container with no +/var/log/syslog file, or the current user may not have necessary +privileges to read the syslog. We are falling back to setting the +total physical amount of memory to the total usable amount of memory +memory (24GB physical, 24GB usable) +``` + +``` +$ GHW_DISABLE_WARNINGS=1 ghwc memory +memory (24GB physical, 24GB usable) +``` + +You can disable warning programmatically using the `WithDisableWarnings` option: + +```go + +import ( + "github.com/jaypipes/ghw" +) + +mem, err := ghw.Memory(ghw.WithDisableWarnings()) +``` + +`WithDisableWarnings` is a alias for the `WithNullAlerter` option, which in turn +leverages the more general `Alerter` feature of ghw. + +You may supply a `Alerter` to ghw to redirect all the warnings there, like +logger objects (see for example golang's stdlib `log.Logger`). +`Alerter` is in fact the minimal logging interface `ghw needs. +To learn more, please check the `option.Alerter` interface and the `ghw.WithAlerter()` +function. + +### Overriding the root mountpoint `ghw` uses + +When `ghw` looks for information about the host system, it considers `/` as its +root mountpoint. So, for example, when looking up CPU information on a Linux +system, `ghw.CPU()` will use the path `/proc/cpuinfo`. + +If you are calling `ghw` from a system that has an alternate root mountpoint, +you can either set the `GHW_CHROOT` environment variable to that alternate +path, or call one of the functions like `ghw.CPU()` or `ghw.Memory()` with the +`ghw.WithChroot()` modifier. + +For example, if you are executing from within an application container that has +bind-mounted the root host filesystem to the mount point `/host`, you would set +`GHW_CHROOT` to `/host` so that `ghw` can find `/proc/cpuinfo` at +`/host/proc/cpuinfo`. + +Alternately, you can use the `ghw.WithChroot()` function like so: + +```go +cpu, err := ghw.CPU(ghw.WithChroot("/host")) +``` + +### Serialization to JSON or YAML + +All of the `ghw` `XXXInfo` structs -- e.g. `ghw.CPUInfo` -- have two methods +for producing a serialized JSON or YAML string representation of the contained +information: + +* `JSONString()` returns a string containing the information serialized into + JSON. It accepts a single boolean parameter indicating whether to use + indentation when outputting the string +* `YAMLString()` returns a string containing the information serialized into + YAML + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/ghw" +) + +func main() { + mem, err := ghw.Memory() + if err != nil { + fmt.Printf("Error getting memory info: %v", err) + } + + fmt.Printf("%s", mem.YAMLString()) +} +``` + +the above example code prints the following out on my local workstation: + +``` +memory: + supported_page_sizes: + - 1073741824 + - 2097152 + total_physical_bytes: 25263415296 + total_usable_bytes: 25263415296 +``` + +### Overriding a specific mountpoint (Linux only) + +When running inside containers, it can be cumbersome to only override the root +mountpoint. Inside containers, when granting access to the host file systems, +it is common to bind-mount them to a non-standard location, like `/sys` on +`/host-sys` or `/proc` to `/host-proc`. It is rarer to mount them to a common +subtree (e.g. `/sys` to `/host/sys` and `/proc` to `/host/proc`...) + +To better cover this use case, `ghw.WithPathOverrides()` can be used to supply +a mapping of directories to mountpoints, like this example shows: + +```go +cpu, err := ghw.CPU(ghw.WithPathOverrides(ghw.PathOverrides{ + "/proc": "/host-proc", + "/sys": "/host-sys", +})) +``` + +**NOTE**: This feature works in addition and is composable with the +`ghw.WithChroot()` function and `GHW_CHROOT` environment variable. + +### Reading hardware information from a `ghw` snapshot (Linux only) + +The `ghw-snapshot` tool can create a snapshot of a host's hardware information. + +Please read [`SNAPSHOT.md`](SNAPSHOT.md) to learn about creating snapshots with +the `ghw-snapshot` tool. + +You can make `ghw` read hardware information from a snapshot created with +`ghw-snapshot` using environment variables or programmatically. + +Use the `GHW_SNAPSHOT_PATH` environment variable to specify the filepath to a +snapshot that `ghw` will read to determine hardware information. All the needed +chroot changes will be automatically performed. By default, the snapshot is +unpacked into a temporary directory managed by `ghw`. This temporary directory +is automatically deleted when `ghw` is finished reading the snapshot. + +Three other environment variables are relevant if and only if `GHW_SNAPSHOT_PATH` +is not empty: + +* `GHW_SNAPSHOT_ROOT` let users specify the directory on which the snapshot + should be unpacked. This moves the ownership of that directory from `ghw` to + users. For this reason, `ghw` will *not* automatically clean up the content + unpacked into `GHW_SNAPSHOT_ROOT`. +* `GHW_SNAPSHOT_EXCLUSIVE` tells `ghw` that the directory is meant only to + contain the given snapshot, thus `ghw` will *not* attempt to unpack it unless + the directory is empty. You can use both `GHW_SNAPSHOT_ROOT` and + `GHW_SNAPSHOT_EXCLUSIVE` to make sure `ghw` unpacks the snapshot only once + regardless of how many `ghw` packages (e.g. cpu, memory) access it. Set the + value of this environment variable to any non-empty string. +* `GHW_SNAPSHOT_PRESERVE` tells `ghw` not to clean up the unpacked snapshot. + Set the value of this environment variable to any non-empty string. + +```go +cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{ + Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz", +})) + + +myRoot := "/my/safe/directory" +cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{ + Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz", + Root: &myRoot, +})) + +myOtherRoot := "/my/other/safe/directory" +cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{ + Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz", + Root: &myOtherRoot, + Exclusive: true, +})) +``` + +### Creating snapshots + +You can create `ghw` snapshots using the `ghw-snapshot` tool or +programmatically using the `pkg/snapshot` package. + +Below is an example of creating a `ghw` snapshot using the `pkg/snapshot` +package. + +```go + +import ( + "fmt" + "os" + + "github.com/jaypipes/ghw/pkg/snapshot" +) + +// ... + +scratchDir, err := os.MkdirTemp("", "ghw-snapshot-*") +if err != nil { + fmt.Printf("Error creating clone directory: %v", err) +} +defer os.RemoveAll(scratchDir) + +// this step clones all the files and directories ghw cares about +if err := snapshot.CloneTreeInto(scratchDir); err != nil { + fmt.Printf("error cloning into %q: %v", scratchDir, err) +} + +// optionally, you may add extra content into your snapshot. +// ghw will ignore the extra content. +// Glob patterns like `filepath.Glob` are supported. +fileSpecs := []string{ + "/proc/cmdline", +} + +// options allows the client code to optionally deference symlinks, or copy +// them into the cloned tree as symlinks +var opts *snapshot.CopyFileOptions +if err := snapshot.CopyFilesInto(fileSpecs, scratchDir, opts); err != nil { + fmt.Printf("error cloning extra files into %q: %v", scratchDir, err) +} + +// automates the creation of the gzipped tarball out of the given tree. +if err := snapshot.PackFrom("my-snapshot.tgz", scratchDir); err != nil { + fmt.Printf("error packing %q into %q: %v", scratchDir, *output, err) +} +``` + +## Calling external programs + +By default `ghw` may call external programs, for example `ethtool`, to learn +about hardware capabilities. In some rare circumstances it may be useful to +opt out from this behaviour and rely only on the data provided by +pseudo-filesystems, like sysfs. + +The most common use case is when we want to read a snapshot from `ghw`. In +these cases the information provided by tools will be inconsistent with the +data from the snapshot - since they will be run on a different host than the +host the snapshot was created for. + +To prevent `ghw` from calling external tools, set the `GHW_DISABLE_TOOLS` +environment variable to any value, or, programmatically, use the +`ghw.WithDisableTools()` function. The default behaviour of ghw is to call +external tools when available. + +> **WARNING**: on all platforms, disabling external tools make ghw return less +> data. Unless noted otherwise, there is _no fallback flow_ if external tools +> are disabled. On MacOSX/Darwin, disabling external tools disables block +> support entirely + +## Developers + +[Contributions](CONTRIBUTING.md) to `ghw` are welcomed! Fork the repo on GitHub +and submit a pull request with your proposed changes. Or, feel free to log an +issue for a feature request or bug report. diff --git a/vendor/github.com/jaypipes/ghw/SECURITY.md b/vendor/github.com/jaypipes/ghw/SECURITY.md new file mode 100644 index 00000000..660f2e63 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/SECURITY.md @@ -0,0 +1,23 @@ +# Security Policy + +We take security vulnerabilities seriously (and so should you!) + +Our policy on reported vulnerabilities (see below on how to report) is that we will +respond to the reporter of a vulnerability within two (2) business days of receiving +the report and notify the reporter whether and when a remediation will be committed. + +When a remediation for a security vulnerability is committed, we will cut a tagged +release of `ghw` and include in the release notes for that tagged release a description +of the vulnerability and a discussion of how it was remediated, along with a note +urging users to update to that fixed version. + +## Reporting a Vulnerability + +While `ghw` does have automated Github Dependabot alerts about security vulnerabilities +in `ghw`'s dependencies, there is always a chance that a vulnerability in a dependency +goes undetected by Dependabot. If you are aware of a vulnerability either in `ghw` or +one of its dependencies, please do not hesitate to reach out to `ghw` maintainers via +email or Slack. **Do not discuss vulnerabilities in a public forum**. + +`ghw`'s primary maintainer is Jay Pipes, who can be found on the Kubernetes Slack +community as `@jaypipes` and reached via email at jaypipes at gmail dot com. diff --git a/vendor/github.com/jaypipes/ghw/SNAPSHOT.md b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md new file mode 100644 index 00000000..696a3ea6 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md @@ -0,0 +1,45 @@ +# ghw snapshots + +For ghw, snapshots are partial clones of the `/proc`, `/sys` (et. al.) subtrees copied from arbitrary +machines, which ghw can consume later. "partial" is because the snapshot doesn't need to contain a +complete copy of all the filesystem subtree (that is doable but inpractical). It only needs to contain +the paths ghw cares about. The snapshot concept was introduced [to make ghw easier to test](https://github.com/jaypipes/ghw/issues/66). + +## Create and consume snapshot + +The recommended way to create snapshots for ghw is to use the `ghw-snapshot` tool. +This tool is maintained by the ghw authors, and snapshots created with this tool are guaranteed to work. + +To consume the ghw snapshots, please check the `README.md` document. + +## Snapshot design and definitions + +The remainder of this document will describe how a snapshot looks like and provides rationale for all the major design decisions. +Even though this document aims to provide all the necessary information to understand how ghw creates snapshots and what you should +expect, we recommend to check also the [project issues](https://github.com/jaypipes/ghw/issues) and the `git` history to have the full picture. + +### Scope + +ghw supports snapshots only on linux platforms. This restriction may be lifted in future releases. +Snapshots must be consumable in the following supported ways: + +1. (way 1) from docker (or podman), mounting them as volumes. See `hack/run-against-snapshot.sh` +2. (way 2) using the environment variables `GHW_SNAPSHOT_*`. See `README.md` for the full documentation. + +Other combinations are possible, but are unsupported and may stop working any time. +You should depend only on the supported ways to consume snapshots. + +### Snapshot content constraints + +Stemming from the use cases, the snapshot content must have the following properties: + +0. (constraint 0) MUST contain the same information as live system (obviously). Whatever you learn from a live system, you MUST be able to learn from a snapshot. +1. (constraint 1) MUST NOT require any post processing before it is consumable besides, obviously, unpacking the `.tar.gz` on the right directory - and pointing ghw to that directory. +2. (constraint 2) MUST NOT require any special handling nor special code path in ghw. From ghw perspective running against a live system or against a snapshot should be completely transparent. +3. (constraint 3) MUST contain only data - no executable code is allowed ever. This makes snapshots trivially safe to share and consume. +4. (constraint 4) MUST NOT contain any personally-identifiable data. Data gathered into a snapshot is for testing and troubleshooting purposes and should be safe to send to troubleshooters to analyze. + +It must be noted that trivially cloning subtrees from `/proc` and `/sys` and creating a tarball out of them doesn't work +because both pseudo filesystems make use of symlinks, and [docker doesn't really play nice with symlinks](https://github.com/jaypipes/ghw/commit/f8ffd4d24e62eb9017511f072ccf51f13d4a3399). +This conflcits with (way 1) above. + diff --git a/vendor/github.com/jaypipes/ghw/alias.go b/vendor/github.com/jaypipes/ghw/alias.go new file mode 100644 index 00000000..9c403d96 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/alias.go @@ -0,0 +1,193 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package ghw + +import ( + "github.com/jaypipes/ghw/pkg/accelerator" + "github.com/jaypipes/ghw/pkg/baseboard" + "github.com/jaypipes/ghw/pkg/bios" + "github.com/jaypipes/ghw/pkg/block" + "github.com/jaypipes/ghw/pkg/chassis" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/gpu" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/jaypipes/ghw/pkg/net" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/pci" + pciaddress "github.com/jaypipes/ghw/pkg/pci/address" + "github.com/jaypipes/ghw/pkg/product" + "github.com/jaypipes/ghw/pkg/topology" +) + +type WithOption = option.Option + +var ( + WithChroot = option.WithChroot + WithSnapshot = option.WithSnapshot + WithAlerter = option.WithAlerter + WithNullAlerter = option.WithNullAlerter + // match the existing environ variable to minimize surprises + WithDisableWarnings = option.WithNullAlerter + WithDisableTools = option.WithDisableTools + WithPathOverrides = option.WithPathOverrides +) + +type SnapshotOptions = option.SnapshotOptions + +type PathOverrides = option.PathOverrides + +type CPUInfo = cpu.Info + +var ( + CPU = cpu.New +) + +type MemoryArea = memory.Area +type MemoryInfo = memory.Info +type MemoryCache = memory.Cache +type MemoryCacheType = memory.CacheType +type MemoryModule = memory.Module + +const ( + MemoryCacheTypeUnified = memory.CacheTypeUnified + // DEPRECATED: Please use MemoryCacheTypeUnified + MEMORY_CACHE_TYPE_UNIFIED = memory.CACHE_TYPE_UNIFIED + MemoryCacheTypeInstruction = memory.CacheTypeInstruction + // DEPRECATED: Please use MemoryCacheTypeInstruction + MEMORY_CACHE_TYPE_INSTRUCTION = memory.CACHE_TYPE_INSTRUCTION + MemoryCacheTypeData = memory.CacheTypeData + // DEPRECATED: Please use MemoryCacheTypeData + MEMORY_CACHE_TYPE_DATA = memory.CACHE_TYPE_DATA +) + +var ( + Memory = memory.New +) + +type BlockInfo = block.Info +type Disk = block.Disk +type Partition = block.Partition + +var ( + Block = block.New +) + +type DriveType = block.DriveType + +const ( + DriveTypeUnknown = block.DriveTypeUnknown + // DEPRECATED: Please use DriveTypeUnknown + DRIVE_TYPE_UNKNOWN = block.DRIVE_TYPE_UNKNOWN + DriveTypeHDD = block.DriveTypeHDD + // DEPRECATED: Please use DriveTypeHDD + DRIVE_TYPE_HDD = block.DRIVE_TYPE_HDD + DriveTypeFDD = block.DriveTypeFDD + // DEPRECATED: Please use DriveTypeFDD + DRIVE_TYPE_FDD = block.DRIVE_TYPE_FDD + DriveTypeODD = block.DriveTypeODD + // DEPRECATED: Please use DriveTypeODD + DRIVE_TYPE_ODD = block.DRIVE_TYPE_ODD + DriveTypeSSD = block.DriveTypeSSD + // DEPRECATED: Please use DriveTypeSSD + DRIVE_TYPE_SSD = block.DRIVE_TYPE_SSD +) + +type StorageController = block.StorageController + +const ( + StorageControllerUnknown = block.StorageControllerUnknown + // DEPRECATED: Please use StorageControllerUnknown + STORAGE_CONTROLLER_UNKNOWN = block.STORAGE_CONTROLLER_UNKNOWN + StorageControllerIDE = block.StorageControllerIDE + // DEPRECATED: Please use StorageControllerIDE + STORAGE_CONTROLLER_IDE = block.STORAGE_CONTROLLER_IDE + StorageControllerSCSI = block.StorageControllerSCSI + // DEPRECATED: Please use StorageControllerSCSI + STORAGE_CONTROLLER_SCSI = block.STORAGE_CONTROLLER_SCSI + StorageControllerNVMe = block.StorageControllerNVMe + // DEPRECATED: Please use StorageControllerNVMe + STORAGE_CONTROLLER_NVME = block.STORAGE_CONTROLLER_NVME + StorageControllerVirtIO = block.StorageControllerVirtIO + // DEPRECATED: Please use StorageControllerVirtIO + STORAGE_CONTROLLER_VIRTIO = block.STORAGE_CONTROLLER_VIRTIO + StorageControllerMMC = block.StorageControllerMMC + // DEPRECATED: Please use StorageControllerMMC + STORAGE_CONTROLLER_MMC = block.STORAGE_CONTROLLER_MMC +) + +type NetworkInfo = net.Info +type NIC = net.NIC +type NICCapability = net.NICCapability + +var ( + Network = net.New +) + +type BIOSInfo = bios.Info + +var ( + BIOS = bios.New +) + +type ChassisInfo = chassis.Info + +var ( + Chassis = chassis.New +) + +type BaseboardInfo = baseboard.Info + +var ( + Baseboard = baseboard.New +) + +type TopologyInfo = topology.Info +type TopologyNode = topology.Node + +var ( + Topology = topology.New +) + +type Architecture = topology.Architecture + +const ( + ArchitectureSMP = topology.ArchitectureSMP + // DEPRECATED: Please use ArchitectureSMP + ARCHITECTURE_SMP = topology.ArchitectureSMP + ArchitectureNUMA = topology.ArchitectureNUMA + // DEPRECATED: Please use ArchitectureNUMA + ARCHITECTURE_NUMA = topology.ArchitectureNUMA +) + +type PCIInfo = pci.Info +type PCIAddress = pciaddress.Address +type PCIDevice = pci.Device + +var ( + PCI = pci.New + PCIAddressFromString = pciaddress.FromString +) + +type ProductInfo = product.Info + +var ( + Product = product.New +) + +type GPUInfo = gpu.Info +type GraphicsCard = gpu.GraphicsCard + +var ( + GPU = gpu.New +) + +type AcceleratorInfo = accelerator.Info +type AcceleratorDevice = accelerator.AcceleratorDevice + +var ( + Accelerator = accelerator.New +) diff --git a/vendor/github.com/jaypipes/ghw/doc.go b/vendor/github.com/jaypipes/ghw/doc.go new file mode 100644 index 00000000..826ed7b3 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/doc.go @@ -0,0 +1,14 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +/* +package ghw discovers hardware-related information about the host computer, +including CPU, memory, block storage, NUMA topology, network devices, PCI, GPU, +and baseboard/BIOS/chassis/product information. + +Please see the extensive README.md document for examples of usage. +*/ +package ghw diff --git a/vendor/github.com/jaypipes/ghw/host.go b/vendor/github.com/jaypipes/ghw/host.go new file mode 100644 index 00000000..89b1ad27 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/host.go @@ -0,0 +1,147 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package ghw + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + + "github.com/jaypipes/ghw/pkg/accelerator" + "github.com/jaypipes/ghw/pkg/baseboard" + "github.com/jaypipes/ghw/pkg/bios" + "github.com/jaypipes/ghw/pkg/block" + "github.com/jaypipes/ghw/pkg/chassis" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/gpu" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/jaypipes/ghw/pkg/net" + "github.com/jaypipes/ghw/pkg/pci" + "github.com/jaypipes/ghw/pkg/product" + "github.com/jaypipes/ghw/pkg/topology" +) + +// HostInfo is a wrapper struct containing information about the host system's +// memory, block storage, CPU, etc +type HostInfo struct { + ctx *context.Context + Memory *memory.Info `json:"memory"` + Block *block.Info `json:"block"` + CPU *cpu.Info `json:"cpu"` + Topology *topology.Info `json:"topology"` + Network *net.Info `json:"network"` + GPU *gpu.Info `json:"gpu"` + Accelerator *accelerator.Info `json:"accelerator"` + Chassis *chassis.Info `json:"chassis"` + BIOS *bios.Info `json:"bios"` + Baseboard *baseboard.Info `json:"baseboard"` + Product *product.Info `json:"product"` + PCI *pci.Info `json:"pci"` +} + +// Host returns a pointer to a HostInfo struct that contains fields with +// information about the host system's CPU, memory, network devices, etc +func Host(opts ...*WithOption) (*HostInfo, error) { + ctx := context.New(opts...) + + memInfo, err := memory.New(opts...) + if err != nil { + return nil, err + } + blockInfo, err := block.New(opts...) + if err != nil { + return nil, err + } + cpuInfo, err := cpu.New(opts...) + if err != nil { + return nil, err + } + topologyInfo, err := topology.New(opts...) + if err != nil { + return nil, err + } + netInfo, err := net.New(opts...) + if err != nil { + return nil, err + } + gpuInfo, err := gpu.New(opts...) + if err != nil { + return nil, err + } + acceleratorInfo, err := accelerator.New(opts...) + if err != nil { + return nil, err + } + chassisInfo, err := chassis.New(opts...) + if err != nil { + return nil, err + } + biosInfo, err := bios.New(opts...) + if err != nil { + return nil, err + } + baseboardInfo, err := baseboard.New(opts...) + if err != nil { + return nil, err + } + productInfo, err := product.New(opts...) + if err != nil { + return nil, err + } + pciInfo, err := pci.New(opts...) + if err != nil { + return nil, err + } + return &HostInfo{ + ctx: ctx, + CPU: cpuInfo, + Memory: memInfo, + Block: blockInfo, + Topology: topologyInfo, + Network: netInfo, + GPU: gpuInfo, + Accelerator: acceleratorInfo, + Chassis: chassisInfo, + BIOS: biosInfo, + Baseboard: baseboardInfo, + Product: productInfo, + PCI: pciInfo, + }, nil +} + +// String returns a newline-separated output of the HostInfo's component +// structs' String-ified output +func (info *HostInfo) String() string { + return fmt.Sprintf( + "%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n", + info.Block.String(), + info.CPU.String(), + info.GPU.String(), + info.Accelerator.String(), + info.Memory.String(), + info.Network.String(), + info.Topology.String(), + info.Chassis.String(), + info.BIOS.String(), + info.Baseboard.String(), + info.Product.String(), + info.PCI.String(), + ) +} + +// YAMLString returns a string with the host information formatted as YAML +// under a top-level "host:" key +func (i *HostInfo) YAMLString() string { + return marshal.SafeYAML(i.ctx, i) +} + +// JSONString returns a string with the host information formatted as JSON +// under a top-level "host:" key +func (i *HostInfo) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, i, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator.go b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator.go new file mode 100644 index 00000000..b51ef2e2 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator.go @@ -0,0 +1,84 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package accelerator + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/pci" +) + +type AcceleratorDevice struct { + // the PCI address where the accelerator device can be found + Address string `json:"address"` + // pointer to a PCIDevice struct that describes the vendor and product + // model, etc + PCIDevice *pci.Device `json:"pci_device"` +} + +func (dev *AcceleratorDevice) String() string { + deviceStr := dev.Address + if dev.PCIDevice != nil { + deviceStr = dev.PCIDevice.String() + } + nodeStr := "" + return fmt.Sprintf( + "device %s@%s", + nodeStr, + deviceStr, + ) +} + +type Info struct { + ctx *context.Context + Devices []*AcceleratorDevice `json:"devices"` +} + +// New returns a pointer to an Info struct that contains information about the +// accelerator devices on the host system +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +func (i *Info) String() string { + numDevsStr := "devices" + if len(i.Devices) == 1 { + numDevsStr = "device" + } + return fmt.Sprintf( + "processing accelerators (%d %s)", + len(i.Devices), + numDevsStr, + ) +} + +// simple private struct used to encapsulate processing accelerators information in a top-level +// "accelerator" YAML/JSON map/object key +type acceleratorPrinter struct { + Info *Info `json:"accelerator"` +} + +// YAMLString returns a string with the processing accelerators information formatted as YAML +// under a top-level "accelerator:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, acceleratorPrinter{i}) +} + +// JSONString returns a string with the processing accelerators information formatted as JSON +// under a top-level "accelerator:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, acceleratorPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_linux.go b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_linux.go new file mode 100644 index 00000000..39d7eecb --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_linux.go @@ -0,0 +1,80 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package accelerator + +import ( + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/pci" +) + +// PCI IDs list available at https://admin.pci-ids.ucw.cz/read/PD +const ( + pciClassProcessingAccelerator = "12" + pciSubclassProcessingAccelerator = "00" + pciClassController = "03" + pciSubclass3DController = "02" + pciSubclassDisplayController = "80" +) + +var ( + acceleratorPCIClasses = map[string][]string{ + pciClassProcessingAccelerator: []string{ + pciSubclassProcessingAccelerator, + }, + pciClassController: []string{ + pciSubclass3DController, + pciSubclassDisplayController, + }, + } +) + +func (i *Info) load() error { + accelDevices := make([]*AcceleratorDevice, 0) + + // get PCI devices + pciInfo, err := pci.New(context.WithContext(i.ctx)) + if err != nil { + i.ctx.Warn("error loading PCI information: %s", err) + return nil + } + + // Prepare hardware filter based in the PCI Class + Subclass + isAccelerator := func(dev *pci.Device) bool { + class := dev.Class.ID + subclass := dev.Subclass.ID + if subclasses, ok := acceleratorPCIClasses[class]; ok { + if slicesContains(subclasses, subclass) { + return true + } + } + return false + } + + // This loop iterates over the list of PCI devices and filters them based on discovery criteria + for _, device := range pciInfo.Devices { + if !isAccelerator(device) { + continue + } + accelDev := &AcceleratorDevice{ + Address: device.Address, + PCIDevice: device, + } + accelDevices = append(accelDevices, accelDev) + } + + i.Devices = accelDevices + return nil +} + +// TODO: delete and just use slices.Contains when the minimal golang version we support is 1.21 +func slicesContains(s []string, v string) bool { + for i := range s { + if v == s[i] { + return true + } + } + return false +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_stub.go b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_stub.go new file mode 100644 index 00000000..7e0b9fd4 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/accelerator/accelerator_stub.go @@ -0,0 +1,19 @@ +//go:build !linux +// +build !linux + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package accelerator + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("accelerator.Info.load not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go new file mode 100644 index 00000000..ac4bf41a --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go @@ -0,0 +1,80 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package baseboard + +import ( + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/util" +) + +// Info defines baseboard release information +type Info struct { + ctx *context.Context + AssetTag string `json:"asset_tag"` + SerialNumber string `json:"serial_number"` + Vendor string `json:"vendor"` + Version string `json:"version"` + Product string `json:"product"` +} + +func (i *Info) String() string { + vendorStr := "" + if i.Vendor != "" { + vendorStr = " vendor=" + i.Vendor + } + serialStr := "" + if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN { + serialStr = " serial=" + i.SerialNumber + } + versionStr := "" + if i.Version != "" { + versionStr = " version=" + i.Version + } + + productStr := "" + if i.Product != "" { + productStr = " product=" + i.Product + } + + return "baseboard" + util.ConcatStrings( + vendorStr, + serialStr, + versionStr, + productStr, + ) +} + +// New returns a pointer to an Info struct containing information about the +// host's baseboard +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// simple private struct used to encapsulate baseboard information in a top-level +// "baseboard" YAML/JSON map/object key +type baseboardPrinter struct { + Info *Info `json:"baseboard"` +} + +// YAMLString returns a string with the baseboard information formatted as YAML +// under a top-level "dmi:" key +func (info *Info) YAMLString() string { + return marshal.SafeYAML(info.ctx, baseboardPrinter{info}) +} + +// JSONString returns a string with the baseboard information formatted as JSON +// under a top-level "baseboard:" key +func (info *Info) JSONString(indent bool) string { + return marshal.SafeJSON(info.ctx, baseboardPrinter{info}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go new file mode 100644 index 00000000..c8c598d4 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go @@ -0,0 +1,20 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package baseboard + +import ( + "github.com/jaypipes/ghw/pkg/linuxdmi" +) + +func (i *Info) load() error { + i.AssetTag = linuxdmi.Item(i.ctx, "board_asset_tag") + i.SerialNumber = linuxdmi.Item(i.ctx, "board_serial") + i.Vendor = linuxdmi.Item(i.ctx, "board_vendor") + i.Version = linuxdmi.Item(i.ctx, "board_version") + i.Product = linuxdmi.Item(i.ctx, "board_name") + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go new file mode 100644 index 00000000..f5b14691 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package baseboard + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("baseboardFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go new file mode 100644 index 00000000..0fb14fbf --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go @@ -0,0 +1,37 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package baseboard + +import ( + "github.com/StackExchange/wmi" +) + +const wqlBaseboard = "SELECT Manufacturer, SerialNumber, Tag, Version, Product FROM Win32_BaseBoard" + +type win32Baseboard struct { + Manufacturer *string + SerialNumber *string + Tag *string + Version *string + Product *string +} + +func (i *Info) load() error { + // Getting data from WMI + var win32BaseboardDescriptions []win32Baseboard + if err := wmi.Query(wqlBaseboard, &win32BaseboardDescriptions); err != nil { + return err + } + if len(win32BaseboardDescriptions) > 0 { + i.AssetTag = *win32BaseboardDescriptions[0].Tag + i.SerialNumber = *win32BaseboardDescriptions[0].SerialNumber + i.Vendor = *win32BaseboardDescriptions[0].Manufacturer + i.Version = *win32BaseboardDescriptions[0].Version + i.Product = *win32BaseboardDescriptions[0].Product + } + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go new file mode 100644 index 00000000..85a7c64b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go @@ -0,0 +1,77 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package bios + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/util" +) + +// Info defines BIOS release information +type Info struct { + ctx *context.Context + Vendor string `json:"vendor"` + Version string `json:"version"` + Date string `json:"date"` +} + +func (i *Info) String() string { + + vendorStr := "" + if i.Vendor != "" { + vendorStr = " vendor=" + i.Vendor + } + versionStr := "" + if i.Version != "" { + versionStr = " version=" + i.Version + } + dateStr := "" + if i.Date != "" && i.Date != util.UNKNOWN { + dateStr = " date=" + i.Date + } + + res := fmt.Sprintf( + "bios%s%s%s", + vendorStr, + versionStr, + dateStr, + ) + return res +} + +// New returns a pointer to a Info struct containing information +// about the host's BIOS +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// simple private struct used to encapsulate BIOS information in a top-level +// "bios" YAML/JSON map/object key +type biosPrinter struct { + Info *Info `json:"bios"` +} + +// YAMLString returns a string with the BIOS information formatted as YAML +// under a top-level "dmi:" key +func (info *Info) YAMLString() string { + return marshal.SafeYAML(info.ctx, biosPrinter{info}) +} + +// JSONString returns a string with the BIOS information formatted as JSON +// under a top-level "bios:" key +func (info *Info) JSONString(indent bool) string { + return marshal.SafeJSON(info.ctx, biosPrinter{info}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go new file mode 100644 index 00000000..9788f4f7 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go @@ -0,0 +1,16 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package bios + +import "github.com/jaypipes/ghw/pkg/linuxdmi" + +func (i *Info) load() error { + i.Vendor = linuxdmi.Item(i.ctx, "bios_vendor") + i.Version = linuxdmi.Item(i.ctx, "bios_version") + i.Date = linuxdmi.Item(i.ctx, "bios_date") + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go new file mode 100644 index 00000000..5307b4a0 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package bios + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("biosFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go new file mode 100644 index 00000000..778628e9 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go @@ -0,0 +1,32 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package bios + +import ( + "github.com/StackExchange/wmi" +) + +const wqlBIOS = "SELECT InstallDate, Manufacturer, Version FROM CIM_BIOSElement" + +type win32BIOS struct { + InstallDate *string + Manufacturer *string + Version *string +} + +func (i *Info) load() error { + // Getting data from WMI + var win32BIOSDescriptions []win32BIOS + if err := wmi.Query(wqlBIOS, &win32BIOSDescriptions); err != nil { + return err + } + if len(win32BIOSDescriptions) > 0 { + i.Vendor = *win32BIOSDescriptions[0].Manufacturer + i.Version = *win32BIOSDescriptions[0].Version + i.Date = *win32BIOSDescriptions[0].InstallDate + } + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block.go b/vendor/github.com/jaypipes/ghw/pkg/block/block.go new file mode 100644 index 00000000..5e75eea6 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block.go @@ -0,0 +1,416 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package block + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/unitutil" + "github.com/jaypipes/ghw/pkg/util" +) + +// DriveType describes the general category of drive device +type DriveType int + +const ( + // DriveTypeUnknown means we could not determine the drive type of the disk + DriveTypeUnknown DriveType = iota + // DriveTypeHDD indicates a hard disk drive + DriveTypeHDD + // DriveTypeFDD indicates a floppy disk drive + DriveTypeFDD + // DriveTypeODD indicates an optical disk drive + DriveTypeODD + // DriveTypeSSD indicates a solid-state drive + DriveTypeSSD + // DriveTypeVirtual indicates a virtual drive i.e. loop devices + DriveTypeVirtual +) + +const ( + // DEPRECATED: Please use DriveTypeUnknown + DRIVE_TYPE_UNKNOWN = DriveTypeUnknown + // DEPRECATED: Please use DriveTypeHDD + DRIVE_TYPE_HDD = DriveTypeHDD + // DEPRECATED: Please use DriveTypeFDD + DRIVE_TYPE_FDD = DriveTypeFDD + // DEPRECATED: Please use DriveTypeODD + DRIVE_TYPE_ODD = DriveTypeODD + // DEPRECATED: Please use DriveTypeSSD + DRIVE_TYPE_SSD = DriveTypeSSD + // DEPRECATED: Please use DriveTypeVirtual + DRIVE_TYPE_VIRTUAL = DriveTypeVirtual +) + +var ( + driveTypeString = map[DriveType]string{ + DriveTypeUnknown: "Unknown", + DriveTypeHDD: "HDD", + DriveTypeFDD: "FDD", + DriveTypeODD: "ODD", + DriveTypeSSD: "SSD", + DriveTypeVirtual: "virtual", + } + + // NOTE(fromani): the keys are all lowercase and do not match + // the keys in the opposite table `driveTypeString`. + // This is done because of the choice we made in + // DriveType::MarshalJSON. + // We use this table only in UnmarshalJSON, so it should be OK. + stringDriveType = map[string]DriveType{ + "unknown": DriveTypeUnknown, + "hdd": DriveTypeHDD, + "fdd": DriveTypeFDD, + "odd": DriveTypeODD, + "ssd": DriveTypeSSD, + "virtual": DriveTypeVirtual, + } +) + +func (dt DriveType) String() string { + return driveTypeString[dt] +} + +// NOTE(jaypipes): since serialized output is as "official" as we're going to +// get, let's lowercase the string output when serializing, in order to +// "normalize" the expected serialized output +func (dt DriveType) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.ToLower(dt.String()))), nil +} + +func (dt *DriveType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + key := strings.ToLower(s) + val, ok := stringDriveType[key] + if !ok { + return fmt.Errorf("unknown drive type: %q", key) + } + *dt = val + return nil +} + +// StorageController is a category of block storage controller/driver. It +// represents more of the physical hardware interface than the storage +// protocol, which represents more of the software interface. +// +// See discussion on https://github.com/jaypipes/ghw/issues/117 +type StorageController int + +const ( + // StorageControllerUnknown indicates we could not determine the storage + // controller for the disk + StorageControllerUnknown StorageController = iota + // StorageControllerIDE indicates a Integrated Drive Electronics (IDE) + // controller + StorageControllerIDE + // StorageControllerSCSI indicates a Small computer system interface + // (SCSI) controller + StorageControllerSCSI + // StorageControllerNVMe indicates a Non-volatile Memory Express (NVMe) + // controller + StorageControllerNVMe + // StorageControllerVirtIO indicates a virtualized storage + // controller/driver + StorageControllerVirtIO + // StorageControllerMMC indicates a Multi-media controller (used for mobile + // phone storage devices) + StorageControllerMMC + // StorageControllerLoop indicates a loopback storage controller + StorageControllerLoop +) + +const ( + // DEPRECATED: Please use StorageControllerUnknown + STORAGE_CONTROLLER_UNKNOWN = StorageControllerUnknown + // DEPRECATED: Please use StorageControllerIDE + STORAGE_CONTROLLER_IDE = StorageControllerIDE + // DEPRECATED: Please use StorageControllerSCSI + STORAGE_CONTROLLER_SCSI = StorageControllerSCSI + // DEPRECATED: Please use StorageControllerNVMe + STORAGE_CONTROLLER_NVME = StorageControllerNVMe + // DEPRECATED: Please use StorageControllerVirtIO + STORAGE_CONTROLLER_VIRTIO = StorageControllerVirtIO + // DEPRECATED: Please use StorageControllerMMC + STORAGE_CONTROLLER_MMC = StorageControllerMMC + // DEPRECATED: Please use StorageControllerLoop + STORAGE_CONTROLLER_LOOP = StorageControllerLoop +) + +var ( + storageControllerString = map[StorageController]string{ + StorageControllerUnknown: "Unknown", + StorageControllerIDE: "IDE", + StorageControllerSCSI: "SCSI", + StorageControllerNVMe: "NVMe", + StorageControllerVirtIO: "virtio", + StorageControllerMMC: "MMC", + StorageControllerLoop: "loop", + } + + // NOTE(fromani): the keys are all lowercase and do not match + // the keys in the opposite table `storageControllerString`. + // This is done/ because of the choice we made in + // StorageController::MarshalJSON. + // We use this table only in UnmarshalJSON, so it should be OK. + stringStorageController = map[string]StorageController{ + "unknown": StorageControllerUnknown, + "ide": StorageControllerIDE, + "scsi": StorageControllerSCSI, + "nvme": StorageControllerNVMe, + "virtio": StorageControllerVirtIO, + "mmc": StorageControllerMMC, + "loop": StorageControllerLoop, + } +) + +func (sc StorageController) String() string { + return storageControllerString[sc] +} + +func (sc *StorageController) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + key := strings.ToLower(s) + val, ok := stringStorageController[key] + if !ok { + return fmt.Errorf("unknown storage controller: %q", key) + } + *sc = val + return nil +} + +// NOTE(jaypipes): since serialized output is as "official" as we're going to +// get, let's lowercase the string output when serializing, in order to +// "normalize" the expected serialized output +func (sc StorageController) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.ToLower(sc.String()))), nil +} + +// Disk describes a single disk drive on the host system. Disk drives provide +// raw block storage resources. +type Disk struct { + // Name contains a short name for the disk, e.g. `sda` + Name string `json:"name"` + // SizeBytes contains the total amount of storage, in bytes, for this disk + SizeBytes uint64 `json:"size_bytes"` + // PhysicalBlockSizeBytes is the size, in bytes, of the physical blocks in + // this disk. This is typically the minimum amount of data that can be + // written to a disk in a single write operation. + PhysicalBlockSizeBytes uint64 `json:"physical_block_size_bytes"` + // DriveType is the category of disk drive for this disk. + DriveType DriveType `json:"drive_type"` + // IsRemovable indicates if the disk drive is removable. + IsRemovable bool `json:"removable"` + // StorageController is the category of storage controller used by the + // disk. + StorageController StorageController `json:"storage_controller"` + // BusPath is the filepath to the bus for this disk. + BusPath string `json:"bus_path"` + // NUMANodeID contains the numeric index (0-based) of the NUMA Node this + // disk is affined to, or -1 if the host system is non-NUMA. + // TODO(jaypipes): Convert this to a TopologyNode struct pointer and then + // add to serialized output as "numa_node,omitempty" + NUMANodeID int `json:"-"` + // Vendor is the manufacturer of the disk. + Vendor string `json:"vendor"` + // Model is the model number of the disk. + Model string `json:"model"` + // SerialNumber is the serial number of the disk. + SerialNumber string `json:"serial_number"` + // WWN is the World-wide Name of the disk. + // See: https://en.wikipedia.org/wiki/World_Wide_Name + WWN string `json:"wwn"` + // WWNNoExtension is the World-wide Name of the disk with any vendor + // extensions excluded. + // See: https://en.wikipedia.org/wiki/World_Wide_Name + WWNNoExtension string `json:"wwnNoExtension"` + // Partitions contains an array of pointers to `Partition` structs, one for + // each partition on the disk. + Partitions []*Partition `json:"partitions"` + // TODO(jaypipes): Add PCI field for accessing PCI device information + // PCI *PCIDevice `json:"pci"` +} + +// Partition describes a logical division of a Disk. +type Partition struct { + // Disk is a pointer to the `Disk` struct that houses this partition. + Disk *Disk `json:"-"` + // Name is the system name given to the partition, e.g. "sda1". + Name string `json:"name"` + // Label is the human-readable label given to the partition. On Linux, this + // is derived from the `ID_PART_ENTRY_NAME` udev entry. + Label string `json:"label"` + // MountPoint is the path where this partition is mounted. + MountPoint string `json:"mount_point"` + // SizeBytes contains the total amount of storage, in bytes, this partition + // can consume. + SizeBytes uint64 `json:"size_bytes"` + // Type contains the type of the partition. + Type string `json:"type"` + // IsReadOnly indicates if the partition is marked read-only. + IsReadOnly bool `json:"read_only"` + // UUID is the universally-unique identifier (UUID) for the partition. + // This will be volume UUID on Darwin, PartUUID on linux, empty on Windows. + UUID string `json:"uuid"` + // FilesystemLabel is the label of the filesystem contained on the + // partition. On Linux, this is derived from the `ID_FS_NAME` udev entry. + FilesystemLabel string `json:"filesystem_label"` +} + +// Info describes all disk drives and partitions in the host system. +type Info struct { + ctx *context.Context + // TotalSizeBytes contains the total amount of storage, in bytes, on the + // host system. + TotalSizeBytes uint64 `json:"total_size_bytes"` + // DEPRECATED: Please use TotalSizeBytes + TotalPhysicalBytes uint64 `json:"-"` + // Disks contains an array of pointers to `Disk` structs, one for each disk + // drive on the host system. + Disks []*Disk `json:"disks"` + // Partitions contains an array of pointers to `Partition` structs, one for + // each partition on any disk drive on the host system. + Partitions []*Partition `json:"-"` +} + +// New returns a pointer to an Info struct that describes the block storage +// resources of the host system. +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// String returns a short string indicating important information about the +// block storage on the host system. +func (i *Info) String() string { + tpbs := util.UNKNOWN + if i.TotalPhysicalBytes > 0 { + tpb := i.TotalPhysicalBytes + unit, unitStr := unitutil.AmountString(int64(tpb)) + tpb = uint64(math.Ceil(float64(tpb) / float64(unit))) + tpbs = fmt.Sprintf("%d%s", tpb, unitStr) + } + dplural := "disks" + if len(i.Disks) == 1 { + dplural = "disk" + } + return fmt.Sprintf("block storage (%d %s, %s physical storage)", + len(i.Disks), dplural, tpbs) +} + +// String returns a short string indicating important information about the +// disk. +func (d *Disk) String() string { + sizeStr := util.UNKNOWN + if d.SizeBytes > 0 { + size := d.SizeBytes + unit, unitStr := unitutil.AmountString(int64(size)) + size = uint64(math.Ceil(float64(size) / float64(unit))) + sizeStr = fmt.Sprintf("%d%s", size, unitStr) + } + atNode := "" + if d.NUMANodeID >= 0 { + atNode = fmt.Sprintf(" (node #%d)", d.NUMANodeID) + } + vendor := "" + if d.Vendor != "" { + vendor = " vendor=" + d.Vendor + } + model := "" + if d.Model != util.UNKNOWN { + model = " model=" + d.Model + } + serial := "" + if d.SerialNumber != util.UNKNOWN { + serial = " serial=" + d.SerialNumber + } + wwn := "" + if d.WWN != util.UNKNOWN { + wwn = " WWN=" + d.WWN + } + removable := "" + if d.IsRemovable { + removable = " removable=true" + } + return fmt.Sprintf( + "%s %s (%s) %s [@%s%s]%s", + d.Name, + d.DriveType.String(), + sizeStr, + d.StorageController.String(), + d.BusPath, + atNode, + util.ConcatStrings( + vendor, + model, + serial, + wwn, + removable, + ), + ) +} + +// String returns a short string indicating important information about the +// partition. +func (p *Partition) String() string { + typeStr := "" + if p.Type != "" { + typeStr = fmt.Sprintf("[%s]", p.Type) + } + mountStr := "" + if p.MountPoint != "" { + mountStr = fmt.Sprintf(" mounted@%s", p.MountPoint) + } + sizeStr := util.UNKNOWN + if p.SizeBytes > 0 { + size := p.SizeBytes + unit, unitStr := unitutil.AmountString(int64(size)) + size = uint64(math.Ceil(float64(size) / float64(unit))) + sizeStr = fmt.Sprintf("%d%s", size, unitStr) + } + return fmt.Sprintf( + "%s (%s) %s%s", + p.Name, + sizeStr, + typeStr, + mountStr, + ) +} + +// simple private struct used to encapsulate block information in a top-level +// "block" YAML/JSON map/object key +type blockPrinter struct { + Info *Info `json:"block" yaml:"block"` +} + +// YAMLString returns a string with the block information formatted as YAML +// under a top-level "block:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, blockPrinter{i}) +} + +// JSONString returns a string with the block information formatted as JSON +// under a top-level "block:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, blockPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go new file mode 100644 index 00000000..c6b6c266 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go @@ -0,0 +1,290 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package block + +import ( + "fmt" + "os" + "os/exec" + "path" + "strings" + + "github.com/pkg/errors" + "howett.net/plist" +) + +type diskOrPartitionPlistNode struct { + Content string + DeviceIdentifier string + DiskUUID string + VolumeName string + VolumeUUID string + Size int64 + MountPoint string + Partitions []diskOrPartitionPlistNode + APFSVolumes []diskOrPartitionPlistNode +} + +type diskUtilListPlist struct { + AllDisks []string + AllDisksAndPartitions []diskOrPartitionPlistNode + VolumesFromDisks []string + WholeDisks []string +} + +type diskUtilInfoPlist struct { + AESHardware bool // true + Bootable bool // true + BooterDeviceIdentifier string // disk1s2 + BusProtocol string // PCI-Express + CanBeMadeBootable bool // false + CanBeMadeBootableRequiresDestroy bool // false + Content string // some-uuid-foo-bar + DeviceBlockSize int64 // 4096 + DeviceIdentifier string // disk1s1 + DeviceNode string // /dev/disk1s1 + DeviceTreePath string // IODeviceTree:/PCI0@0/RP17@1B/ANS2@0/AppleANS2Controller + DiskUUID string // some-uuid-foo-bar + Ejectable bool // false + EjectableMediaAutomaticUnderSoftwareControl bool // false + EjectableOnly bool // false + FilesystemName string // APFS + FilesystemType string // apfs + FilesystemUserVisibleName string // APFS + FreeSpace int64 // 343975677952 + GlobalPermissionsEnabled bool // true + IOKitSize int64 // 499963174912 + IORegistryEntryName string // Macintosh HD + Internal bool // true + MediaName string // + MediaType string // Generic + MountPoint string // / + ParentWholeDisk string // disk1 + PartitionMapPartition bool // false + RAIDMaster bool // false + RAIDSlice bool // false + RecoveryDeviceIdentifier string // disk1s3 + Removable bool // false + RemovableMedia bool // false + RemovableMediaOrExternalDevice bool // false + SMARTStatus string // Verified + Size int64 // 499963174912 + SolidState bool // true + SupportsGlobalPermissionsDisable bool // true + SystemImage bool // false + TotalSize int64 // 499963174912 + VolumeAllocationBlockSize int64 // 4096 + VolumeName string // Macintosh HD + VolumeSize int64 // 499963174912 + VolumeUUID string // some-uuid-foo-bar + WholeDisk bool // false + Writable bool // true + WritableMedia bool // true + WritableVolume bool // true + // also has a SMARTDeviceSpecificKeysMayVaryNotGuaranteed dict with various info + // NOTE: VolumeUUID sometimes == DiskUUID, but not always. So far Content is always a different UUID. +} + +type ioregPlist struct { + // there's a lot more than just this... + ModelNumber string `plist:"Model Number"` + SerialNumber string `plist:"Serial Number"` + VendorName string `plist:"Vendor Name"` +} + +func getDiskUtilListPlist() (*diskUtilListPlist, error) { + out, err := exec.Command("diskutil", "list", "-plist").Output() + if err != nil { + return nil, errors.Wrap(err, "diskutil list failed") + } + + var data diskUtilListPlist + if _, err := plist.Unmarshal(out, &data); err != nil { + return nil, errors.Wrap(err, "diskutil list plist unmarshal failed") + } + + return &data, nil +} + +func getDiskUtilInfoPlist(device string) (*diskUtilInfoPlist, error) { + out, err := exec.Command("diskutil", "info", "-plist", device).Output() + if err != nil { + return nil, errors.Wrapf(err, "diskutil info for %q failed", device) + } + + var data diskUtilInfoPlist + if _, err := plist.Unmarshal(out, &data); err != nil { + return nil, errors.Wrapf(err, "diskutil info plist unmarshal for %q failed", device) + } + + return &data, nil +} + +func getIoregPlist(ioDeviceTreePath string) (*ioregPlist, error) { + name := path.Base(ioDeviceTreePath) + + args := []string{ + "ioreg", + "-a", // use XML output + "-d", "1", // limit device tree output depth to root node + "-r", // root device tree at matched node + "-n", name, // match by name + } + out, err := exec.Command(args[0], args[1:]...).Output() + if err != nil { + return nil, errors.Wrapf(err, "ioreg query for %q failed", ioDeviceTreePath) + } + if out == nil || len(out) == 0 { + return nil, nil + } + + var data []ioregPlist + if _, err := plist.Unmarshal(out, &data); err != nil { + return nil, errors.Wrapf(err, "ioreg unmarshal for %q failed", ioDeviceTreePath) + } + if len(data) != 1 { + err := errors.Errorf("ioreg unmarshal resulted in %d I/O device tree nodes (expected 1)", len(data)) + return nil, err + } + + return &data[0], nil +} + +func makePartition(disk, s diskOrPartitionPlistNode, isAPFS bool) (*Partition, error) { + if s.Size < 0 { + return nil, errors.Errorf("invalid size %q of partition %q", s.Size, s.DeviceIdentifier) + } + + var partType string + if isAPFS { + partType = "APFS Volume" + } else { + partType = s.Content + } + + info, err := getDiskUtilInfoPlist(s.DeviceIdentifier) + if err != nil { + return nil, err + } + + return &Partition{ + Disk: nil, // filled in later + Name: s.DeviceIdentifier, + Label: s.VolumeName, + MountPoint: s.MountPoint, + SizeBytes: uint64(s.Size), + Type: partType, + IsReadOnly: !info.WritableVolume, + UUID: s.VolumeUUID, + }, nil +} + +// driveTypeFromPlist looks at the supplied property list struct and attempts to +// determine the disk type +func driveTypeFromPlist(infoPlist *diskUtilInfoPlist) DriveType { + dt := DriveTypeHDD + if infoPlist.SolidState { + dt = DriveTypeSSD + } + // TODO(jaypipes): Figure out how to determine floppy and/or CD/optical + // drive type on Mac + return dt +} + +// storageControllerFromPlist looks at the supplied property list struct and +// attempts to determine the storage controller in use for the device +func storageControllerFromPlist(infoPlist *diskUtilInfoPlist) StorageController { + sc := StorageControllerSCSI + if strings.HasSuffix(infoPlist.DeviceTreePath, "IONVMeController") { + sc = StorageControllerNVMe + } + // TODO(jaypipes): I don't know if Mac even supports IDE controllers and + // the "virtio" controller is libvirt-specific + return sc +} + +func (info *Info) load() error { + if !info.ctx.EnableTools { + return fmt.Errorf("EnableTools=false on darwin disables block support entirely.") + } + + listPlist, err := getDiskUtilListPlist() + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + return err + } + + var tsb uint64 + info.Disks = make([]*Disk, 0, len(listPlist.AllDisksAndPartitions)) + info.Partitions = []*Partition{} + + for _, disk := range listPlist.AllDisksAndPartitions { + if disk.Size < 0 { + return errors.Errorf("invalid size %q of disk %q", disk.Size, disk.DeviceIdentifier) + } + + infoPlist, err := getDiskUtilInfoPlist(disk.DeviceIdentifier) + if err != nil { + return err + } + if infoPlist.DeviceBlockSize < 0 { + return errors.Errorf("invalid block size %q of disk %q", infoPlist.DeviceBlockSize, disk.DeviceIdentifier) + } + + busPath := strings.TrimPrefix(infoPlist.DeviceTreePath, "IODeviceTree:") + + ioregPlist, err := getIoregPlist(infoPlist.DeviceTreePath) + if err != nil { + return err + } + if ioregPlist == nil { + continue + } + + // The NUMA node & WWN don't seem to be reported by any tools available by default in macOS. + diskReport := &Disk{ + Name: disk.DeviceIdentifier, + SizeBytes: uint64(disk.Size), + PhysicalBlockSizeBytes: uint64(infoPlist.DeviceBlockSize), + DriveType: driveTypeFromPlist(infoPlist), + IsRemovable: infoPlist.Removable, + StorageController: storageControllerFromPlist(infoPlist), + BusPath: busPath, + NUMANodeID: -1, + Vendor: ioregPlist.VendorName, + Model: ioregPlist.ModelNumber, + SerialNumber: ioregPlist.SerialNumber, + WWN: "", + WWNNoExtension: "", + Partitions: make([]*Partition, 0, len(disk.Partitions)+len(disk.APFSVolumes)), + } + + for _, partition := range disk.Partitions { + part, err := makePartition(disk, partition, false) + if err != nil { + return err + } + part.Disk = diskReport + diskReport.Partitions = append(diskReport.Partitions, part) + } + for _, volume := range disk.APFSVolumes { + part, err := makePartition(disk, volume, true) + if err != nil { + return err + } + part.Disk = diskReport + diskReport.Partitions = append(diskReport.Partitions, part) + } + + tsb += uint64(disk.Size) + info.Disks = append(info.Disks, diskReport) + info.Partitions = append(info.Partitions, diskReport.Partitions...) + } + info.TotalSizeBytes = tsb + info.TotalPhysicalBytes = tsb + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go new file mode 100644 index 00000000..3542bd72 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go @@ -0,0 +1,525 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package block + +import ( + "bufio" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/util" +) + +const ( + sectorSize = 512 +) + +func (i *Info) load() error { + paths := linuxpath.New(i.ctx) + i.Disks = disks(i.ctx, paths) + var tsb uint64 + for _, d := range i.Disks { + tsb += d.SizeBytes + } + i.TotalSizeBytes = tsb + i.TotalPhysicalBytes = tsb + return nil +} + +func diskPhysicalBlockSizeBytes(paths *linuxpath.Paths, disk string) uint64 { + // We can find the sector size in Linux by looking at the + // /sys/block/$DEVICE/queue/physical_block_size file in sysfs + path := filepath.Join(paths.SysBlock, disk, "queue", "physical_block_size") + contents, err := os.ReadFile(path) + if err != nil { + return 0 + } + size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64) + if err != nil { + return 0 + } + return size +} + +func diskSizeBytes(paths *linuxpath.Paths, disk string) uint64 { + // We can find the number of 512-byte sectors by examining the contents of + // /sys/block/$DEVICE/size and calculate the physical bytes accordingly. + path := filepath.Join(paths.SysBlock, disk, "size") + contents, err := os.ReadFile(path) + if err != nil { + return 0 + } + size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64) + if err != nil { + return 0 + } + return size * sectorSize +} + +func diskNUMANodeID(paths *linuxpath.Paths, disk string) int { + link, err := os.Readlink(filepath.Join(paths.SysBlock, disk)) + if err != nil { + return -1 + } + for partial := link; strings.HasPrefix(partial, "../devices/"); partial = filepath.Base(partial) { + if nodeContents, err := os.ReadFile(filepath.Join(paths.SysBlock, partial, "numa_node")); err != nil { + if nodeInt, err := strconv.Atoi(string(nodeContents)); err != nil { + return nodeInt + } + } + } + return -1 +} + +func diskVendor(paths *linuxpath.Paths, disk string) string { + // In Linux, the vendor for a disk device is found in the + // /sys/block/$DEVICE/device/vendor file in sysfs + path := filepath.Join(paths.SysBlock, disk, "device", "vendor") + contents, err := os.ReadFile(path) + if err != nil { + return util.UNKNOWN + } + return strings.TrimSpace(string(contents)) +} + +// udevInfoDisk gets the udev info for a disk +func udevInfoDisk(paths *linuxpath.Paths, disk string) (map[string]string, error) { + // Get device major:minor numbers + devNo, err := os.ReadFile(filepath.Join(paths.SysBlock, disk, "dev")) + if err != nil { + return nil, err + } + return udevInfo(paths, string(devNo)) +} + +// udevInfoPartition gets the udev info for a partition +func udevInfoPartition(paths *linuxpath.Paths, disk string, partition string) (map[string]string, error) { + // Get device major:minor numbers + devNo, err := os.ReadFile(filepath.Join(paths.SysBlock, disk, partition, "dev")) + if err != nil { + return nil, err + } + return udevInfo(paths, string(devNo)) +} + +func udevInfo(paths *linuxpath.Paths, devNo string) (map[string]string, error) { + // Look up block device in udev runtime database + udevID := "b" + strings.TrimSpace(devNo) + udevBytes, err := os.ReadFile(filepath.Join(paths.RunUdevData, udevID)) + if err != nil { + return nil, err + } + + udevInfo := make(map[string]string) + for _, udevLine := range strings.Split(string(udevBytes), "\n") { + if strings.HasPrefix(udevLine, "E:") { + if s := strings.SplitN(udevLine[2:], "=", 2); len(s) == 2 { + udevInfo[s[0]] = s[1] + } + } + } + return udevInfo, nil +} + +func diskModel(paths *linuxpath.Paths, disk string) string { + info, err := udevInfoDisk(paths, disk) + if err != nil { + return util.UNKNOWN + } + + if model, ok := info["ID_MODEL"]; ok { + return model + } + return util.UNKNOWN +} + +func diskSerialNumber(paths *linuxpath.Paths, disk string) string { + info, err := udevInfoDisk(paths, disk) + if err != nil { + return util.UNKNOWN + } + + // First try to use the serial from sg3_utils + if serial, ok := info["SCSI_IDENT_SERIAL"]; ok { + return serial + } + + // Fall back to ID_SCSI_SERIAL + if serial, ok := info["ID_SCSI_SERIAL"]; ok { + return serial + } + + // There are two serial number keys, ID_SERIAL and ID_SERIAL_SHORT The + // non-_SHORT version often duplicates vendor information collected + // elsewhere, so use _SHORT and fall back to ID_SERIAL if missing... + if serial, ok := info["ID_SERIAL_SHORT"]; ok { + return serial + } + if serial, ok := info["ID_SERIAL"]; ok { + return serial + } + return util.UNKNOWN +} + +func diskBusPath(paths *linuxpath.Paths, disk string) string { + info, err := udevInfoDisk(paths, disk) + if err != nil { + return util.UNKNOWN + } + + // There are two path keys, ID_PATH and ID_PATH_TAG. + // The difference seems to be _TAG has funky characters converted to underscores. + if path, ok := info["ID_PATH"]; ok { + return path + } + return util.UNKNOWN +} + +func diskWWNNoExtension(paths *linuxpath.Paths, disk string) string { + info, err := udevInfoDisk(paths, disk) + if err != nil { + return util.UNKNOWN + } + + if wwn, ok := info["ID_WWN"]; ok { + return wwn + } + return util.UNKNOWN +} + +func diskWWN(paths *linuxpath.Paths, disk string) string { + info, err := udevInfoDisk(paths, disk) + if err != nil { + return util.UNKNOWN + } + + // Trying ID_WWN_WITH_EXTENSION and falling back to ID_WWN is the same logic lsblk uses + if wwn, ok := info["ID_WWN_WITH_EXTENSION"]; ok { + return wwn + } + if wwn, ok := info["ID_WWN"]; ok { + return wwn + } + // Device Mapper devices get DM_WWN instead of ID_WWN_WITH_EXTENSION + if wwn, ok := info["DM_WWN"]; ok { + return wwn + } + return util.UNKNOWN +} + +// diskPartitions takes the name of a disk (note: *not* the path of the disk, +// but just the name. In other words, "sda", not "/dev/sda" and "nvme0n1" not +// "/dev/nvme0n1") and returns a slice of pointers to Partition structs +// representing the partitions in that disk +func diskPartitions(ctx *context.Context, paths *linuxpath.Paths, disk string) []*Partition { + out := make([]*Partition, 0) + path := filepath.Join(paths.SysBlock, disk) + files, err := os.ReadDir(path) + if err != nil { + ctx.Warn("failed to read disk partitions: %s\n", err) + return out + } + for _, file := range files { + fname := file.Name() + if !strings.HasPrefix(fname, disk) { + continue + } + size := partitionSizeBytes(paths, disk, fname) + mp, pt, ro := partitionInfo(paths, fname) + du := diskPartUUID(paths, disk, fname) + label := diskPartLabel(paths, disk, fname) + if pt == "" { + pt = diskPartTypeUdev(paths, disk, fname) + } + fsLabel := diskFSLabel(paths, disk, fname) + p := &Partition{ + Name: fname, + SizeBytes: size, + MountPoint: mp, + Type: pt, + IsReadOnly: ro, + UUID: du, + Label: label, + FilesystemLabel: fsLabel, + } + out = append(out, p) + } + return out +} + +func diskFSLabel(paths *linuxpath.Paths, disk string, partition string) string { + info, err := udevInfoPartition(paths, disk, partition) + if err != nil { + return util.UNKNOWN + } + + if label, ok := info["ID_FS_LABEL"]; ok { + return label + } + return util.UNKNOWN +} + +func diskPartLabel(paths *linuxpath.Paths, disk string, partition string) string { + info, err := udevInfoPartition(paths, disk, partition) + if err != nil { + return util.UNKNOWN + } + + if label, ok := info["ID_PART_ENTRY_NAME"]; ok { + return label + } + return util.UNKNOWN +} + +// diskPartTypeUdev gets the partition type from the udev database directly and its only used as fallback when +// the partition is not mounted, so we cannot get the type from paths.ProcMounts from the partitionInfo function +func diskPartTypeUdev(paths *linuxpath.Paths, disk string, partition string) string { + info, err := udevInfoPartition(paths, disk, partition) + if err != nil { + return util.UNKNOWN + } + + if pType, ok := info["ID_FS_TYPE"]; ok { + return pType + } + return util.UNKNOWN +} + +func diskPartUUID(paths *linuxpath.Paths, disk string, partition string) string { + info, err := udevInfoPartition(paths, disk, partition) + if err != nil { + return util.UNKNOWN + } + + if pType, ok := info["ID_PART_ENTRY_UUID"]; ok { + return pType + } + return util.UNKNOWN +} + +func diskIsRemovable(paths *linuxpath.Paths, disk string) bool { + path := filepath.Join(paths.SysBlock, disk, "removable") + contents, err := os.ReadFile(path) + if err != nil { + return false + } + removable := strings.TrimSpace(string(contents)) + return removable == "1" +} + +func disks(ctx *context.Context, paths *linuxpath.Paths) []*Disk { + // In Linux, we could use the fdisk, lshw or blockdev commands to list disk + // information, however all of these utilities require root privileges to + // run. We can get all of this information by examining the /sys/block + // and /sys/class/block files + disks := make([]*Disk, 0) + files, err := os.ReadDir(paths.SysBlock) + if err != nil { + return nil + } + for _, file := range files { + dname := file.Name() + + driveType, storageController := diskTypes(dname) + // TODO(jaypipes): Move this into diskTypes() once abstracting + // diskIsRotational for ease of unit testing + if !diskIsRotational(ctx, paths, dname) { + driveType = DRIVE_TYPE_SSD + } + size := diskSizeBytes(paths, dname) + pbs := diskPhysicalBlockSizeBytes(paths, dname) + busPath := diskBusPath(paths, dname) + node := diskNUMANodeID(paths, dname) + vendor := diskVendor(paths, dname) + model := diskModel(paths, dname) + serialNo := diskSerialNumber(paths, dname) + wwn := diskWWN(paths, dname) + wwnNoExtension := diskWWNNoExtension(paths, dname) + removable := diskIsRemovable(paths, dname) + + if storageController == STORAGE_CONTROLLER_LOOP && size == 0 { + // We don't care about unused loop devices... + continue + } + d := &Disk{ + Name: dname, + SizeBytes: size, + PhysicalBlockSizeBytes: pbs, + DriveType: driveType, + IsRemovable: removable, + StorageController: storageController, + BusPath: busPath, + NUMANodeID: node, + Vendor: vendor, + Model: model, + SerialNumber: serialNo, + WWN: wwn, + WWNNoExtension: wwnNoExtension, + } + + parts := diskPartitions(ctx, paths, dname) + // Map this Disk object into the Partition... + for _, part := range parts { + part.Disk = d + } + d.Partitions = parts + + disks = append(disks, d) + } + + return disks +} + +// diskTypes returns the drive type, storage controller and bus type of a disk +func diskTypes(dname string) ( + DriveType, + StorageController, +) { + // The conditionals below which set the controller and drive type are + // based on information listed here: + // https://en.wikipedia.org/wiki/Device_file + driveType := DriveTypeUnknown + storageController := StorageControllerUnknown + if strings.HasPrefix(dname, "fd") { + driveType = DriveTypeFDD + } else if strings.HasPrefix(dname, "sd") { + driveType = DriveTypeHDD + storageController = StorageControllerSCSI + } else if strings.HasPrefix(dname, "hd") { + driveType = DriveTypeHDD + storageController = StorageControllerIDE + } else if strings.HasPrefix(dname, "vd") { + driveType = DriveTypeHDD + storageController = StorageControllerVirtIO + } else if strings.HasPrefix(dname, "nvme") { + driveType = DriveTypeSSD + storageController = StorageControllerNVMe + } else if strings.HasPrefix(dname, "sr") { + driveType = DriveTypeODD + storageController = StorageControllerSCSI + } else if strings.HasPrefix(dname, "xvd") { + driveType = DriveTypeHDD + storageController = StorageControllerSCSI + } else if strings.HasPrefix(dname, "mmc") { + driveType = DriveTypeSSD + storageController = StorageControllerMMC + } else if strings.HasPrefix(dname, "loop") { + driveType = DriveTypeVirtual + storageController = StorageControllerLoop + } + + return driveType, storageController +} + +func diskIsRotational(ctx *context.Context, paths *linuxpath.Paths, devName string) bool { + path := filepath.Join(paths.SysBlock, devName, "queue", "rotational") + contents := util.SafeIntFromFile(ctx, path) + return contents == 1 +} + +// partitionSizeBytes returns the size in bytes of the partition given a disk +// name and a partition name. Note: disk name and partition name do *not* +// contain any leading "/dev" parts. In other words, they are *names*, not +// paths. +func partitionSizeBytes(paths *linuxpath.Paths, disk string, part string) uint64 { + path := filepath.Join(paths.SysBlock, disk, part, "size") + contents, err := os.ReadFile(path) + if err != nil { + return 0 + } + size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64) + if err != nil { + return 0 + } + return size * sectorSize +} + +// Given a full or short partition name, returns the mount point, the type of +// the partition and whether it's readonly +func partitionInfo(paths *linuxpath.Paths, part string) (string, string, bool) { + // Allow calling PartitionInfo with either the full partition name + // "/dev/sda1" or just "sda1" + if !strings.HasPrefix(part, "/dev") { + part = "/dev/" + part + } + + // mount entries for mounted partitions look like this: + // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 + var r io.ReadCloser + r, err := os.Open(paths.ProcMounts) + if err != nil { + return "", "", true + } + defer util.SafeClose(r) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + entry := parseMountEntry(line) + if entry == nil || entry.Partition != part { + continue + } + ro := true + for _, opt := range entry.Options { + if opt == "rw" { + ro = false + break + } + } + + return entry.Mountpoint, entry.FilesystemType, ro + } + return "", "", true +} + +type mountEntry struct { + Partition string + Mountpoint string + FilesystemType string + Options []string +} + +func parseMountEntry(line string) *mountEntry { + // mount entries for mounted partitions look like this: + // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0 + if line[0] != '/' { + return nil + } + fields := strings.Fields(line) + + if len(fields) < 4 { + return nil + } + + // We do some special parsing of the mountpoint, which may contain space, + // tab and newline characters, encoded into the mount entry line using their + // octal-to-string representations. From the GNU mtab man pages: + // + // "Therefore these characters are encoded in the files and the getmntent + // function takes care of the decoding while reading the entries back in. + // '\040' is used to encode a space character, '\011' to encode a tab + // character, '\012' to encode a newline character, and '\\' to encode a + // backslash." + mp := fields[1] + r := strings.NewReplacer( + "\\011", "\t", "\\012", "\n", "\\040", " ", "\\\\", "\\", + ) + mp = r.Replace(mp) + + res := &mountEntry{ + Partition: fields[0], + Mountpoint: mp, + FilesystemType: fields[2], + } + opts := strings.Split(fields[3], ",") + res.Options = opts + return res +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go new file mode 100644 index 00000000..f5b51645 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !darwin && !windows +// +build !linux,!darwin,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package block + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("blockFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go new file mode 100644 index 00000000..270e19f9 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go @@ -0,0 +1,282 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package block + +import ( + "strconv" + "strings" + + "github.com/StackExchange/wmi" + + "github.com/jaypipes/ghw/pkg/util" +) + +type physicalDiskMediaType int + +const ( + physicalDiskMediaTypeUnspecified physicalDiskMediaType = 0 + physicalDiskMediaTypeHDD physicalDiskMediaType = 3 + physicalDiskMediaTypeSSD physicalDiskMediaType = 4 + physicalDiskMediaTypeSCM physicalDiskMediaType = 5 +) + +func (dt physicalDiskMediaType) ToDriveType() DriveType { + switch dt { + case physicalDiskMediaTypeUnspecified: + return DriveTypeUnknown + case physicalDiskMediaTypeHDD: + return DriveTypeHDD + case physicalDiskMediaTypeSSD: + return DriveTypeSSD + case physicalDiskMediaTypeSCM: + return DriveTypeUnknown + } + return DriveTypeUnknown +} + +const wqlDiskDrive = "SELECT Caption, CreationClassName, DefaultBlockSize, Description, DeviceID, Index, InterfaceType, Manufacturer, MediaType, Model, Name, Partitions, SerialNumber, Size, TotalCylinders, TotalHeads, TotalSectors, TotalTracks, TracksPerCylinder FROM Win32_DiskDrive" + +type win32DiskDrive struct { + Caption *string + CreationClassName *string + DefaultBlockSize *uint64 + Description *string + DeviceID *string + Index *uint32 // Used to link with partition + InterfaceType *string + Manufacturer *string + MediaType *string + Model *string + Name *string + Partitions *int32 + SerialNumber *string + Size *uint64 + TotalCylinders *int64 + TotalHeads *int32 + TotalSectors *int64 + TotalTracks *int64 + TracksPerCylinder *int32 +} + +const wqlDiskPartition = "SELECT Access, BlockSize, Caption, CreationClassName, Description, DeviceID, DiskIndex, Index, Name, Size, SystemName, Type FROM Win32_DiskPartition" + +type win32DiskPartition struct { + Access *uint16 + BlockSize *uint64 + Caption *string + CreationClassName *string + Description *string + DeviceID *string + DiskIndex *uint32 // Used to link with Disk Drive + Index *uint32 + Name *string + Size *int64 + SystemName *string + Type *string +} + +const wqlLogicalDiskToPartition = "SELECT Antecedent, Dependent FROM Win32_LogicalDiskToPartition" + +type win32LogicalDiskToPartition struct { + Antecedent *string + Dependent *string +} + +const wqlLogicalDisk = "SELECT Caption, CreationClassName, Description, DeviceID, FileSystem, FreeSpace, Name, Size, SystemName FROM Win32_LogicalDisk" + +type win32LogicalDisk struct { + Caption *string + CreationClassName *string + Description *string + DeviceID *string + FileSystem *string + FreeSpace *uint64 + Name *string + Size *uint64 + SystemName *string +} + +const wqlPhysicalDisk = "SELECT DeviceId, MediaType FROM MSFT_PhysicalDisk" + +type win32PhysicalDisk struct { + DeviceId string + MediaType physicalDiskMediaType +} + +func (i *Info) load() error { + win32DiskDriveDescriptions, err := getDiskDrives() + if err != nil { + return err + } + + win32DiskPartitionDescriptions, err := getDiskPartitions() + if err != nil { + return err + } + + win32LogicalDiskToPartitionDescriptions, err := getLogicalDisksToPartitions() + if err != nil { + return err + } + + win32LogicalDiskDescriptions, err := getLogicalDisks() + if err != nil { + return err + } + + win32PhysicalDisks, err := getPhysicalDisks() + if err != nil { + return err + } + + // Converting into standard structures + disks := make([]*Disk, 0) + for _, diskdrive := range win32DiskDriveDescriptions { + var physicalDiskMediaType physicalDiskMediaType + for _, physicalDisk := range win32PhysicalDisks { + if id, err := strconv.Atoi(physicalDisk.DeviceId); err != nil { + return err + } else if uint32(id) == *diskdrive.Index { + physicalDiskMediaType = physicalDisk.MediaType + } + } + disk := &Disk{ + Name: strings.TrimSpace(*diskdrive.DeviceID), + SizeBytes: *diskdrive.Size, + PhysicalBlockSizeBytes: *diskdrive.DefaultBlockSize, + DriveType: toDriveType(physicalDiskMediaType, *diskdrive.MediaType, *diskdrive.Caption), + StorageController: toStorageController(*diskdrive.InterfaceType), + BusPath: util.UNKNOWN, // TODO: add information + NUMANodeID: -1, + Vendor: strings.TrimSpace(*diskdrive.Manufacturer), + Model: strings.TrimSpace(*diskdrive.Caption), + SerialNumber: strings.TrimSpace(*diskdrive.SerialNumber), + WWN: util.UNKNOWN, // TODO: add information + WWNNoExtension: util.UNKNOWN, // TODO: add information + Partitions: make([]*Partition, 0), + } + for _, diskpartition := range win32DiskPartitionDescriptions { + // Finding disk partition linked to current disk drive + if diskdrive.Index == nil || diskpartition.DiskIndex == nil { + continue + } + if *diskdrive.Index == *diskpartition.DiskIndex { + disk.PhysicalBlockSizeBytes = *diskpartition.BlockSize + // Finding logical partition linked to current disk partition + for _, logicaldisk := range win32LogicalDiskDescriptions { + for _, logicaldisktodiskpartition := range win32LogicalDiskToPartitionDescriptions { + var desiredAntecedent = "\\\\" + *diskpartition.SystemName + "\\root\\cimv2:" + *diskpartition.CreationClassName + ".DeviceID=\"" + *diskpartition.DeviceID + "\"" + var desiredDependent = "\\\\" + *logicaldisk.SystemName + "\\root\\cimv2:" + *logicaldisk.CreationClassName + ".DeviceID=\"" + *logicaldisk.DeviceID + "\"" + if *logicaldisktodiskpartition.Antecedent == desiredAntecedent && *logicaldisktodiskpartition.Dependent == desiredDependent { + // Appending Partition + p := &Partition{ + Name: strings.TrimSpace(*logicaldisk.Caption), + Label: strings.TrimSpace(*logicaldisk.Caption), + SizeBytes: *logicaldisk.Size, + MountPoint: *logicaldisk.DeviceID, + Type: *diskpartition.Type, + IsReadOnly: toReadOnly(*diskpartition.Access), + UUID: "", + } + disk.Partitions = append(disk.Partitions, p) + break + } + } + } + } + } + disks = append(disks, disk) + } + + i.Disks = disks + var tsb uint64 + for _, d := range i.Disks { + tsb += d.SizeBytes + } + i.TotalSizeBytes = tsb + i.TotalPhysicalBytes = tsb + return nil +} + +func getDiskDrives() ([]win32DiskDrive, error) { + // Getting disks drives data from WMI + var win3232DiskDriveDescriptions []win32DiskDrive + if err := wmi.Query(wqlDiskDrive, &win3232DiskDriveDescriptions); err != nil { + return nil, err + } + return win3232DiskDriveDescriptions, nil +} + +func getDiskPartitions() ([]win32DiskPartition, error) { + // Getting disk partitions from WMI + var win32DiskPartitionDescriptions []win32DiskPartition + if err := wmi.Query(wqlDiskPartition, &win32DiskPartitionDescriptions); err != nil { + return nil, err + } + return win32DiskPartitionDescriptions, nil +} + +func getLogicalDisksToPartitions() ([]win32LogicalDiskToPartition, error) { + // Getting links between logical disks and partitions from WMI + var win32LogicalDiskToPartitionDescriptions []win32LogicalDiskToPartition + if err := wmi.Query(wqlLogicalDiskToPartition, &win32LogicalDiskToPartitionDescriptions); err != nil { + return nil, err + } + return win32LogicalDiskToPartitionDescriptions, nil +} + +func getLogicalDisks() ([]win32LogicalDisk, error) { + // Getting logical disks from WMI + var win32LogicalDiskDescriptions []win32LogicalDisk + if err := wmi.Query(wqlLogicalDisk, &win32LogicalDiskDescriptions); err != nil { + return nil, err + } + return win32LogicalDiskDescriptions, nil +} + +func getPhysicalDisks() ([]win32PhysicalDisk, error) { + // Getting physical disks from WMI + var win32PhysicalDisks []win32PhysicalDisk + if err := wmi.QueryNamespace(wqlPhysicalDisk, &win32PhysicalDisks, "root\\Microsoft\\Windows\\Storage"); err != nil { + return nil, err + } + return win32PhysicalDisks, nil +} + +func toDriveType(physicalDiskMediaType physicalDiskMediaType, mediaType string, caption string) DriveType { + if driveType := physicalDiskMediaType.ToDriveType(); driveType != DriveTypeUnknown { + return driveType + } + + mediaType = strings.ToLower(mediaType) + caption = strings.ToLower(caption) + if strings.Contains(mediaType, "fixed") || strings.Contains(mediaType, "ssd") || strings.Contains(caption, "ssd") { + return DriveTypeSSD + } else if strings.ContainsAny(mediaType, "hdd") { + return DriveTypeHDD + } + return DriveTypeUnknown +} + +// TODO: improve +func toStorageController(interfaceType string) StorageController { + var storageController StorageController + switch interfaceType { + case "SCSI": + storageController = StorageControllerSCSI + case "IDE": + storageController = StorageControllerIDE + default: + storageController = StorageControllerUnknown + } + return storageController +} + +// TODO: improve +func toReadOnly(access uint16) bool { + // See Access property from: https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-diskpartition + return access == 0x1 +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go new file mode 100644 index 00000000..a7667bbc --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go @@ -0,0 +1,117 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package chassis + +import ( + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/util" +) + +var ( + chassisTypeDescriptions = map[string]string{ + "1": "Other", + "2": "Unknown", + "3": "Desktop", + "4": "Low profile desktop", + "5": "Pizza box", + "6": "Mini tower", + "7": "Tower", + "8": "Portable", + "9": "Laptop", + "10": "Notebook", + "11": "Hand held", + "12": "Docking station", + "13": "All in one", + "14": "Sub notebook", + "15": "Space-saving", + "16": "Lunch box", + "17": "Main server chassis", + "18": "Expansion chassis", + "19": "SubChassis", + "20": "Bus Expansion chassis", + "21": "Peripheral chassis", + "22": "RAID chassis", + "23": "Rack mount chassis", + "24": "Sealed-case PC", + "25": "Multi-system chassis", + "26": "Compact PCI", + "27": "Advanced TCA", + "28": "Blade", + "29": "Blade enclosure", + "30": "Tablet", + "31": "Convertible", + "32": "Detachable", + "33": "IoT gateway", + "34": "Embedded PC", + "35": "Mini PC", + "36": "Stick PC", + } +) + +// Info defines chassis release information +type Info struct { + ctx *context.Context + AssetTag string `json:"asset_tag"` + SerialNumber string `json:"serial_number"` + Type string `json:"type"` + TypeDescription string `json:"type_description"` + Vendor string `json:"vendor"` + Version string `json:"version"` +} + +func (i *Info) String() string { + vendorStr := "" + if i.Vendor != "" { + vendorStr = " vendor=" + i.Vendor + } + serialStr := "" + if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN { + serialStr = " serial=" + i.SerialNumber + } + versionStr := "" + if i.Version != "" { + versionStr = " version=" + i.Version + } + + return "chassis type=" + util.ConcatStrings( + i.TypeDescription, + vendorStr, + serialStr, + versionStr, + ) +} + +// New returns a pointer to a Info struct containing information +// about the host's chassis +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// simple private struct used to encapsulate chassis information in a top-level +// "chassis" YAML/JSON map/object key +type chassisPrinter struct { + Info *Info `json:"chassis"` +} + +// YAMLString returns a string with the chassis information formatted as YAML +// under a top-level "dmi:" key +func (info *Info) YAMLString() string { + return marshal.SafeYAML(info.ctx, chassisPrinter{info}) +} + +// JSONString returns a string with the chassis information formatted as JSON +// under a top-level "chassis:" key +func (info *Info) JSONString(indent bool) string { + return marshal.SafeJSON(info.ctx, chassisPrinter{info}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go new file mode 100644 index 00000000..00f64de6 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go @@ -0,0 +1,26 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package chassis + +import ( + "github.com/jaypipes/ghw/pkg/linuxdmi" + "github.com/jaypipes/ghw/pkg/util" +) + +func (i *Info) load() error { + i.AssetTag = linuxdmi.Item(i.ctx, "chassis_asset_tag") + i.SerialNumber = linuxdmi.Item(i.ctx, "chassis_serial") + i.Type = linuxdmi.Item(i.ctx, "chassis_type") + typeDesc, found := chassisTypeDescriptions[i.Type] + if !found { + typeDesc = util.UNKNOWN + } + i.TypeDescription = typeDesc + i.Vendor = linuxdmi.Item(i.ctx, "chassis_vendor") + i.Version = linuxdmi.Item(i.ctx, "chassis_version") + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go new file mode 100644 index 00000000..0e3fd94b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package chassis + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("chassisFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go new file mode 100644 index 00000000..088cbed3 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go @@ -0,0 +1,43 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package chassis + +import ( + "github.com/StackExchange/wmi" + + "github.com/jaypipes/ghw/pkg/util" +) + +const wqlChassis = "SELECT Caption, Description, Name, Manufacturer, Model, SerialNumber, Tag, TypeDescriptions, Version FROM CIM_Chassis" + +type win32Chassis struct { + Caption *string + Description *string + Name *string + Manufacturer *string + Model *string + SerialNumber *string + Tag *string + TypeDescriptions []string + Version *string +} + +func (i *Info) load() error { + // Getting data from WMI + var win32ChassisDescriptions []win32Chassis + if err := wmi.Query(wqlChassis, &win32ChassisDescriptions); err != nil { + return err + } + if len(win32ChassisDescriptions) > 0 { + i.AssetTag = *win32ChassisDescriptions[0].Tag + i.SerialNumber = *win32ChassisDescriptions[0].SerialNumber + i.Type = util.UNKNOWN // TODO: + i.TypeDescription = *win32ChassisDescriptions[0].Model + i.Vendor = *win32ChassisDescriptions[0].Manufacturer + i.Version = *win32ChassisDescriptions[0].Version + } + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/context/context.go b/vendor/github.com/jaypipes/ghw/pkg/context/context.go new file mode 100644 index 00000000..fb8de528 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/context/context.go @@ -0,0 +1,178 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package context + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/snapshot" +) + +// Context contains the merged set of configuration switches that act as an +// execution context when calling internal discovery methods +type Context struct { + Chroot string + EnableTools bool + SnapshotPath string + SnapshotRoot string + SnapshotExclusive bool + PathOverrides option.PathOverrides + snapshotUnpackedPath string + alert option.Alerter + err error +} + +// WithContext returns an option.Option that contains a pre-existing Context +// struct. This is useful for some internal code that sets up snapshots. +func WithContext(ctx *Context) *option.Option { + return &option.Option{ + Context: ctx, + } +} + +// Exists returns true if the supplied (merged) Option already contains +// a context. +// +// TODO(jaypipes): We can get rid of this when we combine the option and +// context packages, which will make it easier to detect the presence of a +// pre-setup Context. +func Exists(opt *option.Option) bool { + return opt != nil && opt.Context != nil +} + +// New returns a Context struct pointer that has had various options set on it +func New(opts ...*option.Option) *Context { + merged := option.Merge(opts...) + var ctx *Context + if merged.Context != nil { + var castOK bool + ctx, castOK = merged.Context.(*Context) + if !castOK { + panic("passed in a non-Context for the WithContext() function!") + } + return ctx + } + ctx = &Context{ + alert: option.EnvOrDefaultAlerter(), + Chroot: *merged.Chroot, + } + + if merged.Snapshot != nil { + ctx.SnapshotPath = merged.Snapshot.Path + // root is optional, so a extra check is warranted + if merged.Snapshot.Root != nil { + ctx.SnapshotRoot = *merged.Snapshot.Root + } + ctx.SnapshotExclusive = merged.Snapshot.Exclusive + } + + if merged.Alerter != nil { + ctx.alert = merged.Alerter + } + + if merged.EnableTools != nil { + ctx.EnableTools = *merged.EnableTools + } + + if merged.PathOverrides != nil { + ctx.PathOverrides = merged.PathOverrides + } + + // New is not allowed to return error - it would break the established API. + // so the only way out is to actually do the checks here and record the error, + // and return it later, at the earliest possible occasion, in Setup() + if ctx.SnapshotPath != "" && ctx.Chroot != option.DefaultChroot { + // The env/client code supplied a value, but we are will overwrite it when unpacking shapshots! + ctx.err = fmt.Errorf("Conflicting options: chroot %q and snapshot path %q", ctx.Chroot, ctx.SnapshotPath) + } + return ctx +} + +// FromEnv returns a Context that has been populated from the environs or +// default options values +func FromEnv() *Context { + chrootVal := option.EnvOrDefaultChroot() + enableTools := option.EnvOrDefaultTools() + snapPathVal := option.EnvOrDefaultSnapshotPath() + snapRootVal := option.EnvOrDefaultSnapshotRoot() + snapExclusiveVal := option.EnvOrDefaultSnapshotExclusive() + return &Context{ + Chroot: chrootVal, + EnableTools: enableTools, + SnapshotPath: snapPathVal, + SnapshotRoot: snapRootVal, + SnapshotExclusive: snapExclusiveVal, + } +} + +// Do wraps a Setup/Teardown pair around the given function +func (ctx *Context) Do(fn func() error) error { + err := ctx.Setup() + if err != nil { + return err + } + defer func() { + err := ctx.Teardown() + if err != nil { + ctx.Warn("teardown error: %v", err) + } + }() + return fn() +} + +// Setup prepares the extra optional data a Context may use. +// `Context`s are ready to use once returned by `New`. Optional features, +// like snapshot unpacking, may require extra steps. Run `Setup` to perform them. +// You should call `Setup` just once. It is safe to call `Setup` if you don't make +// use of optional extra features - `Setup` will do nothing. +func (ctx *Context) Setup() error { + if ctx.err != nil { + return ctx.err + } + if ctx.SnapshotPath == "" { + // nothing to do! + return nil + } + + var err error + root := ctx.SnapshotRoot + if root == "" { + root, err = snapshot.Unpack(ctx.SnapshotPath) + if err == nil { + ctx.snapshotUnpackedPath = root + } + } else { + var flags uint + if ctx.SnapshotExclusive { + flags |= snapshot.OwnTargetDirectory + } + _, err = snapshot.UnpackInto(ctx.SnapshotPath, root, flags) + } + if err != nil { + return err + } + + ctx.Chroot = root + return nil +} + +// Teardown releases any resource acquired by Setup. +// You should always call `Teardown` if you called `Setup` to free any resources +// acquired by `Setup`. Check `Do` for more automated management. +func (ctx *Context) Teardown() error { + if ctx.snapshotUnpackedPath == "" { + // if the client code provided the unpack directory, + // then it is also in charge of the cleanup. + return nil + } + return snapshot.Cleanup(ctx.snapshotUnpackedPath) +} + +func (ctx *Context) Warn(msg string, args ...interface{}) { + ctx.alert.Printf("WARNING: "+msg, args...) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go new file mode 100644 index 00000000..5c5e6bab --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go @@ -0,0 +1,193 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package cpu + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" +) + +// ProcessorCore describes a physical host processor core. A processor core is +// a separate processing unit within some types of central processing units +// (CPU). +type ProcessorCore struct { + // ID is the `uint32` identifier that the host gave this core. Note that + // this does *not* necessarily equate to a zero-based index of the core + // within a physical package. For example, the core IDs for an Intel Core + // i7 are 0, 1, 2, 8, 9, and 10 + ID int `json:"id"` + // TotalHardwareThreads is the number of hardware threads associated with + // the core + TotalHardwareThreads uint32 `json:"total_hardware_threads"` + // NumThreads is the number of hardware threads associated with the core. + // DEPRECATED: Use `TotalHardwareThreads` instead. + NumThreads uint32 `json:"total_threads"` + // LogicalProcessors is a slice of ints representing the logical processor + // IDs assigned to any processing unit for the core. These are sometimes + // called the "thread siblings". Logical processor IDs are the *zero-based* + // index of the processor on the host and are *not* related to the core ID. + LogicalProcessors []int `json:"logical_processors"` +} + +// String returns a short string indicating important information about the +// processor core +func (c *ProcessorCore) String() string { + return fmt.Sprintf( + "processor core #%d (%d threads), logical processors %v", + c.ID, + c.TotalHardwareThreads, + c.LogicalProcessors, + ) +} + +// Processor describes a physical host central processing unit (CPU). +type Processor struct { + // ID is the physical processor `uint32` ID according to the system + ID int `json:"id"` + // TotalCores is the number of physical cores in the processor package + TotalCores uint32 `json:"total_cores"` + // NumCores is the number of physical cores in the processor package + // DEPRECATED: Use `TotalCores` instead. + NumCores uint32 `json:"-"` + // TotalHardwareThreads is the number of hardware threads associated with + // the processor package + TotalHardwareThreads uint32 `json:"total_hardware_threads"` + // NumThreads is the number of hardware threads in the processor package + // DEPRECATED: Use `TotalHardwareThreads` instead. + NumThreads uint32 `json:"total_threads"` + // Vendor is a string containing the vendor name + Vendor string `json:"vendor"` + // Model` is a string containing the vendor's model name + Model string `json:"model"` + // Capabilities is a slice of strings indicating the features the processor + // has enabled + Capabilities []string `json:"capabilities"` + // Cores is a slice of ProcessorCore` struct pointers that are packed onto + // this physical processor + Cores []*ProcessorCore `json:"cores"` +} + +// CoreByID returns the ProcessorCore having the supplied ID. +func (p *Processor) CoreByID(coreID int) *ProcessorCore { + for _, core := range p.Cores { + if core.ID == coreID { + return core + } + } + return nil +} + +// HasCapability returns true if the Processor has the supplied cpuid +// capability, false otherwise. Example of cpuid capabilities would be 'vmx' or +// 'sse4_2'. To see a list of potential cpuid capabilitiies, see the section on +// CPUID feature bits in the following article: +// +// https://en.wikipedia.org/wiki/CPUID +func (p *Processor) HasCapability(find string) bool { + for _, c := range p.Capabilities { + if c == find { + return true + } + } + return false +} + +// String returns a short string describing the Processor +func (p *Processor) String() string { + ncs := "cores" + if p.TotalCores == 1 { + ncs = "core" + } + nts := "threads" + if p.TotalHardwareThreads == 1 { + nts = "thread" + } + return fmt.Sprintf( + "physical package #%d (%d %s, %d hardware %s)", + p.ID, + p.TotalCores, + ncs, + p.TotalHardwareThreads, + nts, + ) +} + +// Info describes all central processing unit (CPU) functionality on a host. +// Returned by the `ghw.CPU()` function. +type Info struct { + ctx *context.Context + // TotalCores is the total number of physical cores the host system + // contains + TotalCores uint32 `json:"total_cores"` + // TotalThreads is the total number of hardware threads the host system + // contains + TotalHardwareThreads uint32 `json:"total_hardware_threads"` + // TotalThreads is the total number of hardware threads the host system + // contains + // DEPRECATED: Use `TotalHardwareThreads` instead + TotalThreads uint32 `json:"total_threads"` + // Processors is a slice of Processor struct pointers, one for each + // physical processor package contained in the host + Processors []*Processor `json:"processors"` +} + +// New returns a pointer to an Info struct that contains information about the +// CPUs on the host system +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// String returns a short string indicating a summary of CPU information +func (i *Info) String() string { + nps := "packages" + if len(i.Processors) == 1 { + nps = "package" + } + ncs := "cores" + if i.TotalCores == 1 { + ncs = "core" + } + nts := "threads" + if i.TotalThreads == 1 { + nts = "thread" + } + return fmt.Sprintf( + "cpu (%d physical %s, %d %s, %d hardware %s)", + len(i.Processors), + nps, + i.TotalCores, + ncs, + i.TotalThreads, + nts, + ) +} + +// simple private struct used to encapsulate cpu information in a top-level +// "cpu" YAML/JSON map/object key +type cpuPrinter struct { + Info *Info `json:"cpu"` +} + +// YAMLString returns a string with the cpu information formatted as YAML +// under a top-level "cpu:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, cpuPrinter{i}) +} + +// JSONString returns a string with the cpu information formatted as JSON +// under a top-level "cpu:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, cpuPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_darwin.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_darwin.go new file mode 100644 index 00000000..e4353e47 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_darwin.go @@ -0,0 +1,138 @@ +package cpu + +import ( + "fmt" + "github.com/pkg/errors" + "os/exec" + "strconv" + "strings" +) + +var ( + hasARMArchitecture = false // determine if ARM + sysctlOutput = make(map[string]string) // store all the sysctl output +) + +func (i *Info) load() error { + err := populateSysctlOutput() + if err != nil { + return errors.Wrap(err, "unable to populate sysctl map") + } + + i.TotalCores = getTotalCores() + i.TotalThreads = getTotalThreads() + i.Processors = getProcessors() + + return nil +} + +// getProcessors some more info https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_system_capabilities +func getProcessors() []*Processor { + p := make([]*Processor, getProcTopCount()) + for i, _ := range p { + p[i] = new(Processor) + p[i].Vendor = sysctlOutput[fmt.Sprintf("hw.perflevel%s.name", strconv.Itoa(i))] + p[i].Model = getVendor() + p[i].NumCores = getNumberCoresFromPerfLevel(i) + p[i].Capabilities = getCapabilities() + p[i].Cores = make([]*ProcessorCore, getTotalCores()) + } + return p +} + +// getCapabilities valid for ARM, see https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics +func getCapabilities() []string { + var caps []string + + // add ARM capabilities + if hasARMArchitecture { + for cap, isEnabled := range sysctlOutput { + if isEnabled == "1" { + // capabilities with keys with a common prefix + commonPrefix := "hw.optional.arm." + if strings.HasPrefix(cap, commonPrefix) { + caps = append(caps, strings.TrimPrefix(cap, commonPrefix)) + } + // not following prefix convention but are important + if cap == "hw.optional.AdvSIMD_HPFPCvt" { + caps = append(caps, "AdvSIMD_HPFPCvt") + } + if cap == "hw.optional.armv8_crc32" { + caps = append(caps, "armv8_crc32") + } + } + } + + // hw.optional.AdvSIMD and hw.optional.floatingpoint are always enabled (see linked doc) + caps = append(caps, "AdvSIMD") + caps = append(caps, "floatingpoint") + } + + return caps +} + +// populateSysctlOutput to populate a map to quickly retrieve values later +func populateSysctlOutput() error { + // get sysctl output + o, err := exec.Command("sysctl", "-a").CombinedOutput() + if err != nil { + return err + } + + // clean up and store sysctl output + oS := strings.Split(string(o), "\n") + for _, l := range oS { + if l != "" { + s := strings.SplitN(l, ":", 2) + if len(s) < 2 { + continue + } + k, v := strings.TrimSpace(s[0]), strings.TrimSpace(s[1]) + sysctlOutput[k] = v + + // see if it's possible to determine if ARM + if k == "hw.optional.arm64" && v == "1" { + hasARMArchitecture = true + } + } + } + + return nil +} + +func getNumberCoresFromPerfLevel(i int) uint32 { + key := fmt.Sprintf("hw.perflevel%s.physicalcpu_max", strconv.Itoa(i)) + nCores := sysctlOutput[key] + return stringToUint32(nCores) +} + +func getVendor() string { + v := sysctlOutput["machdep.cpu.brand_string"] + return v +} + +func getProcTopCount() int { + pC, ok := sysctlOutput["hw.nperflevels"] + if !ok { + // most likely intel so no performance/efficiency core seperation + return 1 + } + i, _ := strconv.Atoi(pC) + return i +} + +// num of physical cores +func getTotalCores() uint32 { + nCores := sysctlOutput["hw.physicalcpu_max"] + return stringToUint32(nCores) +} + +func getTotalThreads() uint32 { + nThreads := sysctlOutput["machdep.cpu.thread_count"] + return stringToUint32(nThreads) +} + +func stringToUint32(s string) uint32 { + o, _ := strconv.ParseUint(s, 10, 0) + return uint32(o) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go new file mode 100644 index 00000000..f403d29d --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go @@ -0,0 +1,384 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package cpu + +import ( + "bufio" + "errors" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/util" +) + +var ( + regexForCpulCore = regexp.MustCompile("^cpu([0-9]+)$") + onlineFile = "online" +) + +func (i *Info) load() error { + i.Processors = processorsGet(i.ctx) + var totCores uint32 + var totThreads uint32 + for _, p := range i.Processors { + totCores += p.TotalCores + totThreads += p.TotalHardwareThreads + } + i.TotalCores = totCores + i.TotalHardwareThreads = totThreads + // TODO(jaypipes): Remove TotalThreads before v1.0 + i.TotalThreads = totThreads + return nil +} + +func processorsGet(ctx *context.Context) []*Processor { + paths := linuxpath.New(ctx) + + lps := logicalProcessorsFromProcCPUInfo(ctx) + // keyed by processor ID (physical_package_id) + procs := map[int]*Processor{} + + // /sys/devices/system/cpu pseudodir contains N number of pseudodirs with + // information about the logical processors on the host. These logical + // processor pseudodirs are of the pattern /sys/devices/system/cpu/cpu{N} + fnames, err := os.ReadDir(paths.SysDevicesSystemCPU) + if err != nil { + ctx.Warn("failed to read /sys/devices/system/cpu: %s", err) + return []*Processor{} + } + for _, fname := range fnames { + matches := regexForCpulCore.FindStringSubmatch(fname.Name()) + if len(matches) < 2 { + continue + } + + lpID, err := strconv.Atoi(matches[1]) + if err != nil { + ctx.Warn("failed to find numeric logical processor ID: %s", err) + continue + } + + onlineFilePath := filepath.Join(paths.SysDevicesSystemCPU, fmt.Sprintf("cpu%d", lpID), onlineFile) + if _, err := os.Stat(onlineFilePath); err == nil { + if util.SafeIntFromFile(ctx, onlineFilePath) == 0 { + continue + } + } else if errors.Is(err, os.ErrNotExist) { + // Assume the CPU is online if the online state file doesn't exist + // (as is the case with older snapshots) + } + procID := processorIDFromLogicalProcessorID(ctx, lpID) + proc, found := procs[procID] + if !found { + proc = &Processor{ID: procID} + lp, ok := lps[lpID] + if !ok { + ctx.Warn( + "failed to find attributes for logical processor %d", + lpID, + ) + continue + } + + // Assumes /proc/cpuinfo is in order of logical processor id, then + // lps[lpID] describes logical processor `lpID`. + // Once got a more robust way of fetching the following info, + // can we drop /proc/cpuinfo. + if len(lp.Attrs["flags"]) != 0 { // x86 + proc.Capabilities = strings.Split(lp.Attrs["flags"], " ") + } else if len(lp.Attrs["Features"]) != 0 { // ARM64 + proc.Capabilities = strings.Split(lp.Attrs["Features"], " ") + } + if len(lp.Attrs["model name"]) != 0 { + proc.Model = lp.Attrs["model name"] + } else if len(lp.Attrs["Processor"]) != 0 { // ARM + proc.Model = lp.Attrs["Processor"] + } else if len(lp.Attrs["cpu model"]) != 0 { // MIPS, ARM + proc.Model = lp.Attrs["cpu model"] + } else if len(lp.Attrs["Model Name"]) != 0 { // LoongArch + proc.Model = lp.Attrs["Model Name"] + } else if len(lp.Attrs["uarch"]) != 0 { // SiFive + proc.Model = lp.Attrs["uarch"] + } + if len(lp.Attrs["vendor_id"]) != 0 { + proc.Vendor = lp.Attrs["vendor_id"] + } else if len(lp.Attrs["isa"]) != 0 { // RISCV64 + proc.Vendor = lp.Attrs["isa"] + } else if lp.Attrs["CPU implementer"] == "0x41" { // ARM + proc.Vendor = "ARM" + } + procs[procID] = proc + } + + coreID := coreIDFromLogicalProcessorID(ctx, lpID) + core := proc.CoreByID(coreID) + if core == nil { + core = &ProcessorCore{ + ID: coreID, + TotalHardwareThreads: 1, + // TODO(jaypipes): Remove NumThreads before v1.0 + NumThreads: 1, + } + proc.Cores = append(proc.Cores, core) + proc.TotalCores += 1 + // TODO(jaypipes): Remove NumCores before v1.0 + proc.NumCores += 1 + } else { + core.TotalHardwareThreads += 1 + // TODO(jaypipes) Remove NumThreads before v1.0 + core.NumThreads += 1 + } + proc.TotalHardwareThreads += 1 + // TODO(jaypipes) Remove NumThreads before v1.0 + proc.NumThreads += 1 + core.LogicalProcessors = append(core.LogicalProcessors, lpID) + } + res := []*Processor{} + for _, p := range procs { + for _, c := range p.Cores { + sort.Ints(c.LogicalProcessors) + } + res = append(res, p) + } + return res +} + +// processorIDFromLogicalProcessorID returns the processor physical package ID +// for the supplied logical processor ID +func processorIDFromLogicalProcessorID(ctx *context.Context, lpID int) int { + paths := linuxpath.New(ctx) + // Fetch CPU ID + path := filepath.Join( + paths.SysDevicesSystemCPU, + fmt.Sprintf("cpu%d", lpID), + "topology", "physical_package_id", + ) + return util.SafeIntFromFile(ctx, path) +} + +// coreIDFromLogicalProcessorID returns the core ID for the supplied logical +// processor ID +func coreIDFromLogicalProcessorID(ctx *context.Context, lpID int) int { + paths := linuxpath.New(ctx) + // Fetch CPU ID + path := filepath.Join( + paths.SysDevicesSystemCPU, + fmt.Sprintf("cpu%d", lpID), + "topology", "core_id", + ) + return util.SafeIntFromFile(ctx, path) +} + +func CoresForNode(ctx *context.Context, nodeID int) ([]*ProcessorCore, error) { + // The /sys/devices/system/node/nodeX directory contains a subdirectory + // called 'cpuX' for each logical processor assigned to the node. Each of + // those subdirectories contains a topology subdirectory which has a + // core_id file that indicates the 0-based identifier of the physical core + // the logical processor (hardware thread) is on. + paths := linuxpath.New(ctx) + path := filepath.Join( + paths.SysDevicesSystemNode, + fmt.Sprintf("node%d", nodeID), + ) + cores := make([]*ProcessorCore, 0) + + findCoreByID := func(coreID int) *ProcessorCore { + for _, c := range cores { + if c.ID == coreID { + return c + } + } + + c := &ProcessorCore{ + ID: coreID, + LogicalProcessors: []int{}, + } + cores = append(cores, c) + return c + } + + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, file := range files { + filename := file.Name() + if !strings.HasPrefix(filename, "cpu") { + continue + } + if filename == "cpumap" || filename == "cpulist" { + // There are two files in the node directory that start with 'cpu' + // but are not subdirectories ('cpulist' and 'cpumap'). Ignore + // these files. + continue + } + // Grab the logical processor ID by cutting the integer from the + // /sys/devices/system/node/nodeX/cpuX filename + cpuPath := filepath.Join(path, filename) + procID, err := strconv.Atoi(filename[3:]) + if err != nil { + ctx.Warn( + "failed to determine procID from %s. Expected integer after 3rd char.", + filename, + ) + continue + } + onlineFilePath := filepath.Join(cpuPath, onlineFile) + if _, err := os.Stat(onlineFilePath); err == nil { + if util.SafeIntFromFile(ctx, onlineFilePath) == 0 { + continue + } + } else if errors.Is(err, os.ErrNotExist) { + // Assume the CPU is online if the online state file doesn't exist + // (as is the case with older snapshots) + } + coreIDPath := filepath.Join(cpuPath, "topology", "core_id") + coreID := util.SafeIntFromFile(ctx, coreIDPath) + core := findCoreByID(coreID) + core.LogicalProcessors = append( + core.LogicalProcessors, + procID, + ) + } + + for _, c := range cores { + c.TotalHardwareThreads = uint32(len(c.LogicalProcessors)) + // TODO(jaypipes): Remove NumThreads before v1.0 + c.NumThreads = c.TotalHardwareThreads + } + + return cores, nil +} + +// logicalProcessor contains information about a single logical processor +// on the host. +type logicalProcessor struct { + // This is the logical processor ID assigned by the host. In /proc/cpuinfo, + // this is the zero-based index of the logical processor as it appears in + // the /proc/cpuinfo file and matches the "processor" attribute. In + // /sys/devices/system/cpu/cpu{N} pseudodir entries, this is the N value. + ID int + // The entire collection of string attribute name/value pairs for the + // logical processor. + Attrs map[string]string +} + +// logicalProcessorsFromProcCPUInfo reads the `/proc/cpuinfo` pseudofile and +// returns a map, keyed by logical processor ID, of logical processor structs. +// +// `/proc/cpuinfo` files look like the following: +// +// ``` +// processor : 0 +// vendor_id : AuthenticAMD +// cpu family : 23 +// model : 8 +// model name : AMD Ryzen 7 2700X Eight-Core Processor +// stepping : 2 +// microcode : 0x800820d +// cpu MHz : 2200.000 +// cache size : 512 KB +// physical id : 0 +// siblings : 16 +// core id : 0 +// cpu cores : 8 +// apicid : 0 +// initial apicid : 0 +// fpu : yes +// fpu_exception : yes +// cpuid level : 13 +// wp : yes +// flags : fpu vme de pse tsc msr pae mce +// bugs : sysret_ss_attrs null_seg spectre_v1 spectre_v2 spec_store_bypass retbleed smt_rsb +// bogomips : 7386.41 +// TLB size : 2560 4K pages +// clflush size : 64 +// cache_alignment : 64 +// address sizes : 43 bits physical, 48 bits virtual +// power management: ts ttp tm hwpstate cpb eff_freq_ro [13] [14] +// +// processor : 1 +// vendor_id : AuthenticAMD +// cpu family : 23 +// model : 8 +// model name : AMD Ryzen 7 2700X Eight-Core Processor +// stepping : 2 +// microcode : 0x800820d +// cpu MHz : 1885.364 +// cache size : 512 KB +// physical id : 0 +// siblings : 16 +// core id : 1 +// cpu cores : 8 +// apicid : 2 +// initial apicid : 2 +// fpu : yes +// fpu_exception : yes +// cpuid level : 13 +// wp : yes +// flags : fpu vme de pse tsc msr pae mce +// bugs : sysret_ss_attrs null_seg spectre_v1 spectre_v2 spec_store_bypass retbleed smt_rsb +// bogomips : 7386.41 +// TLB size : 2560 4K pages +// clflush size : 64 +// cache_alignment : 64 +// address sizes : 43 bits physical, 48 bits virtual +// power management: ts ttp tm hwpstate cpb eff_freq_ro [13] [14] +// ``` +// +// with blank line-separated blocks of colon-delimited attribute name/value +// pairs for a specific logical processor on the host. +func logicalProcessorsFromProcCPUInfo( + ctx *context.Context, +) map[int]*logicalProcessor { + paths := linuxpath.New(ctx) + r, err := os.Open(paths.ProcCpuinfo) + if err != nil { + return nil + } + defer util.SafeClose(r) + + lps := map[int]*logicalProcessor{} + + // A map of attributes describing the logical processor + lpAttrs := map[string]string{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + // Output of /proc/cpuinfo has a blank newline to separate logical + // processors, so here we collect up all the attributes we've + // collected for this logical processor block + lpIDstr, ok := lpAttrs["processor"] + if !ok { + ctx.Warn("expected to find 'processor' key in /proc/cpuinfo attributes") + continue + } + lpID, _ := strconv.Atoi(lpIDstr) + lp := &logicalProcessor{ + ID: lpID, + Attrs: lpAttrs, + } + lps[lpID] = lp + // Reset the current set of processor attributes... + lpAttrs = map[string]string{} + continue + } + parts := strings.Split(line, ":") + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + lpAttrs[key] = value + } + return lps +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go new file mode 100644 index 00000000..85156069 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows && !darwin +// +build !linux,!windows,!darwin + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package cpu + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("cpu.Info.load not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go new file mode 100644 index 00000000..bd4b8469 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go @@ -0,0 +1,63 @@ +//go:build !linux +// +build !linux + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package cpu + +import ( + "github.com/StackExchange/wmi" +) + +const wmqlProcessor = "SELECT Manufacturer, Name, NumberOfLogicalProcessors, NumberOfCores FROM Win32_Processor" + +type win32Processor struct { + Manufacturer *string + Name *string + NumberOfLogicalProcessors uint32 + NumberOfCores uint32 +} + +func (i *Info) load() error { + // Getting info from WMI + var win32descriptions []win32Processor + if err := wmi.Query(wmqlProcessor, &win32descriptions); err != nil { + return err + } + // Converting into standard structures + i.Processors = processorsGet(win32descriptions) + var totCores uint32 + var totThreads uint32 + for _, p := range i.Processors { + totCores += p.TotalCores + totThreads += p.TotalHardwareThreads + } + i.TotalCores = totCores + i.TotalHardwareThreads = totThreads + // TODO(jaypipes): Remove TotalThreads by v1.0 + i.TotalThreads = totThreads + return nil +} + +func processorsGet(win32descriptions []win32Processor) []*Processor { + var procs []*Processor + // Converting into standard structures + for index, description := range win32descriptions { + p := &Processor{ + ID: index, + Model: *description.Name, + Vendor: *description.Manufacturer, + TotalCores: description.NumberOfCores, + // TODO(jaypipes): Remove NumCores before v1.0 + NumCores: description.NumberOfCores, + TotalHardwareThreads: description.NumberOfLogicalProcessors, + // TODO(jaypipes): Remove NumThreads before v1.0 + NumThreads: description.NumberOfLogicalProcessors, + } + procs = append(procs, p) + } + return procs +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go new file mode 100644 index 00000000..65864c7e --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go @@ -0,0 +1,95 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package gpu + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/pci" + "github.com/jaypipes/ghw/pkg/topology" +) + +type GraphicsCard struct { + // the PCI address where the graphics card can be found + Address string `json:"address"` + // The "index" of the card on the bus (generally not useful information, + // but might as well include it) + Index int `json:"index"` + // pointer to a PCIDevice struct that describes the vendor and product + // model, etc + // TODO(jaypipes): Rename this field to PCI, instead of DeviceInfo + DeviceInfo *pci.Device `json:"pci"` + // Topology node that the graphics card is affined to. Will be nil if the + // architecture is not NUMA. + Node *topology.Node `json:"node,omitempty"` +} + +func (card *GraphicsCard) String() string { + deviceStr := card.Address + if card.DeviceInfo != nil { + deviceStr = card.DeviceInfo.String() + } + nodeStr := "" + if card.Node != nil { + nodeStr = fmt.Sprintf(" [affined to NUMA node %d]", card.Node.ID) + } + return fmt.Sprintf( + "card #%d %s@%s", + card.Index, + nodeStr, + deviceStr, + ) +} + +type Info struct { + ctx *context.Context + GraphicsCards []*GraphicsCard `json:"cards"` +} + +// New returns a pointer to an Info struct that contains information about the +// graphics cards on the host system +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +func (i *Info) String() string { + numCardsStr := "cards" + if len(i.GraphicsCards) == 1 { + numCardsStr = "card" + } + return fmt.Sprintf( + "gpu (%d graphics %s)", + len(i.GraphicsCards), + numCardsStr, + ) +} + +// simple private struct used to encapsulate gpu information in a top-level +// "gpu" YAML/JSON map/object key +type gpuPrinter struct { + Info *Info `json:"gpu"` +} + +// YAMLString returns a string with the gpu information formatted as YAML +// under a top-level "gpu:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, gpuPrinter{i}) +} + +// JSONString returns a string with the gpu information formatted as JSON +// under a top-level "gpu:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, gpuPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go new file mode 100644 index 00000000..e5e341c1 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go @@ -0,0 +1,170 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package gpu + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/pci" + "github.com/jaypipes/ghw/pkg/topology" + "github.com/jaypipes/ghw/pkg/util" +) + +const ( + validPCIAddress = `\b(0{0,4}:\d{2}:\d{2}.\d:?\w*)` +) + +var reValidPCIAddress = regexp.MustCompile(validPCIAddress) + +const ( + _WARN_NO_SYS_CLASS_DRM = ` +/sys/class/drm does not exist on this system (likely the host system is a +virtual machine or container with no graphics). Therefore, +GPUInfo.GraphicsCards will be an empty array. +` +) + +func (i *Info) load() error { + // In Linux, each graphics card is listed under the /sys/class/drm + // directory as a symbolic link named "cardN", where N is a zero-based + // index of the card in the system. "DRM" stands for Direct Rendering + // Manager and is the Linux subsystem that is responsible for graphics I/O + // + // Each card may have multiple symbolic + // links in this directory representing the interfaces from the graphics + // card over a particular wire protocol (HDMI, DisplayPort, etc). These + // symbolic links are named cardN--. For + // instance, on one of my local workstations with an NVIDIA GTX 1050ti + // graphics card with one HDMI, one DisplayPort, and one DVI interface to + // the card, I see the following in /sys/class/drm: + // + // $ ll /sys/class/drm/ + // total 0 + // drwxr-xr-x 2 root root 0 Jul 16 11:50 ./ + // drwxr-xr-x 75 root root 0 Jul 16 11:50 ../ + // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/ + // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DP-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DP-1/ + // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DVI-D-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DVI-D-1/ + // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-HDMI-A-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-HDMI-A-1/ + // + // In this routine, we are only interested in the first link (card0), which + // we follow to gather information about the actual device from the PCI + // subsystem (we query the modalias file of the PCI device's sysfs + // directory using the `ghw.PCIInfo.GetDevice()` function. + paths := linuxpath.New(i.ctx) + links, err := os.ReadDir(paths.SysClassDRM) + if err != nil { + i.ctx.Warn(_WARN_NO_SYS_CLASS_DRM) + return nil + } + cards := make([]*GraphicsCard, 0) + for _, link := range links { + lname := link.Name() + if !strings.HasPrefix(lname, "card") { + continue + } + if strings.ContainsRune(lname, '-') { + continue + } + // Grab the card's zero-based integer index + lnameBytes := []byte(lname) + cardIdx, err := strconv.Atoi(string(lnameBytes[4:])) + if err != nil { + cardIdx = -1 + } + + // Calculate the card's PCI address by looking at the symbolic link's + // target + lpath := filepath.Join(paths.SysClassDRM, lname) + dest, err := os.Readlink(lpath) + if err != nil { + continue + } + pathParts := strings.Split(dest, "/") + // The PCI address of the graphics card is the *last* PCI address in + // the filepath... + pciAddress := "" + for x := len(pathParts) - 1; x >= 0; x-- { + part := pathParts[x] + if reValidPCIAddress.MatchString(part) { + pciAddress = part + break + } + } + if pciAddress == "" { + continue + } + card := &GraphicsCard{ + Address: pciAddress, + Index: cardIdx, + } + cards = append(cards, card) + } + gpuFillNUMANodes(i.ctx, cards) + gpuFillPCIDevice(i.ctx, cards) + i.GraphicsCards = cards + return nil +} + +// Loops through each GraphicsCard struct and attempts to fill the DeviceInfo +// attribute with PCI device information +func gpuFillPCIDevice(ctx *context.Context, cards []*GraphicsCard) { + pci, err := pci.New(context.WithContext(ctx)) + if err != nil { + ctx.Warn("failed to PCI device database: %s", err) + return + } + for _, card := range cards { + if card.DeviceInfo == nil { + card.DeviceInfo = pci.GetDevice(card.Address) + } + } +} + +// Loops through each GraphicsCard struct and find which NUMA node the card is +// affined to, setting the GraphicsCard.Node field accordingly. If the host +// system is not a NUMA system, the Node field will be set to nil. +func gpuFillNUMANodes(ctx *context.Context, cards []*GraphicsCard) { + paths := linuxpath.New(ctx) + topo, err := topology.New(context.WithContext(ctx)) + if err != nil { + // Problem getting topology information so just set the graphics card's + // node to nil + for _, card := range cards { + if topo.Architecture != topology.ArchitectureNUMA { + card.Node = nil + } + } + return + } + for _, card := range cards { + // Each graphics card on a NUMA system will have a pseudo-file + // called /sys/class/drm/card$CARD_INDEX/device/numa_node which + // contains the NUMA node that the card is affined to + cardIndexStr := strconv.Itoa(card.Index) + fpath := filepath.Join( + paths.SysClassDRM, + "card"+cardIndexStr, + "device", + "numa_node", + ) + nodeIdx := util.SafeIntFromFile(ctx, fpath) + if nodeIdx == -1 { + continue + } + for _, node := range topo.Nodes { + if nodeIdx == int(node.ID) { + card.Node = node + } + } + } +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go new file mode 100644 index 00000000..48991ec8 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package gpu + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("gpuFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go new file mode 100644 index 00000000..70e19918 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go @@ -0,0 +1,133 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package gpu + +import ( + "strings" + + "github.com/StackExchange/wmi" + "github.com/jaypipes/pcidb" + + "github.com/jaypipes/ghw/pkg/pci" + "github.com/jaypipes/ghw/pkg/util" +) + +const wqlVideoController = "SELECT Caption, CreationClassName, Description, DeviceID, DriverVersion, Name, PNPDeviceID, SystemCreationClassName, SystemName, VideoArchitecture, VideoMemoryType, VideoModeDescription, VideoProcessor FROM Win32_VideoController" + +type win32VideoController struct { + Caption string + CreationClassName string + Description string + DeviceID string + DriverVersion string + Name string + PNPDeviceID string + SystemCreationClassName string + SystemName string + VideoArchitecture uint16 + VideoMemoryType uint16 + VideoModeDescription string + VideoProcessor string +} + +const wqlPnPEntity = "SELECT Caption, CreationClassName, Description, DeviceID, Manufacturer, Name, PNPClass, PNPDeviceID FROM Win32_PnPEntity" + +type win32PnPEntity struct { + Caption string + CreationClassName string + Description string + DeviceID string + Manufacturer string + Name string + PNPClass string + PNPDeviceID string +} + +func (i *Info) load() error { + // Getting data from WMI + var win32VideoControllerDescriptions []win32VideoController + if err := wmi.Query(wqlVideoController, &win32VideoControllerDescriptions); err != nil { + return err + } + + // Building dynamic WHERE clause with addresses to create a single query collecting all desired data + queryAddresses := []string{} + for _, description := range win32VideoControllerDescriptions { + var queryAddres = strings.Replace(description.PNPDeviceID, "\\", `\\`, -1) + queryAddresses = append(queryAddresses, "PNPDeviceID='"+queryAddres+"'") + } + whereClause := strings.Join(queryAddresses[:], " OR ") + + // Getting data from WMI + var win32PnPDescriptions []win32PnPEntity + var wqlPnPDevice = wqlPnPEntity + " WHERE " + whereClause + if err := wmi.Query(wqlPnPDevice, &win32PnPDescriptions); err != nil { + return err + } + + // Converting into standard structures + cards := make([]*GraphicsCard, 0) + for _, description := range win32VideoControllerDescriptions { + card := &GraphicsCard{ + Address: description.DeviceID, // https://stackoverflow.com/questions/32073667/how-do-i-discover-the-pcie-bus-topology-and-slot-numbers-on-the-board + Index: 0, + DeviceInfo: GetDevice(description.PNPDeviceID, win32PnPDescriptions), + } + card.DeviceInfo.Driver = description.DriverVersion + cards = append(cards, card) + } + i.GraphicsCards = cards + return nil +} + +func GetDevice(id string, entities []win32PnPEntity) *pci.Device { + // Backslashing PnP address ID as requested by JSON and VMI query: https://docs.microsoft.com/en-us/windows/win32/wmisdk/where-clause + var queryAddress = strings.Replace(id, "\\", `\\`, -1) + // Preparing default structure + var device = &pci.Device{ + Address: queryAddress, + Vendor: &pcidb.Vendor{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + Products: []*pcidb.Product{}, + }, + Subsystem: &pcidb.Product{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + Subsystems: []*pcidb.Product{}, + }, + Product: &pcidb.Product{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + Subsystems: []*pcidb.Product{}, + }, + Class: &pcidb.Class{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + Subclasses: []*pcidb.Subclass{}, + }, + Subclass: &pcidb.Subclass{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + ProgrammingInterfaces: []*pcidb.ProgrammingInterface{}, + }, + ProgrammingInterface: &pcidb.ProgrammingInterface{ + ID: util.UNKNOWN, + Name: util.UNKNOWN, + }, + } + // If an entity is found we get its data inside the standard structure + for _, description := range entities { + if id == description.PNPDeviceID { + device.Vendor.ID = description.Manufacturer + device.Vendor.Name = description.Manufacturer + device.Product.ID = description.Name + device.Product.Name = description.Description + break + } + } + return device +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go new file mode 100644 index 00000000..8e6d8302 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go @@ -0,0 +1,29 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package linuxdmi + +import ( + "os" + "path/filepath" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/util" +) + +func Item(ctx *context.Context, value string) string { + paths := linuxpath.New(ctx) + path := filepath.Join(paths.SysClassDMI, "id", value) + + b, err := os.ReadFile(path) + if err != nil { + ctx.Warn("Unable to read %s: %s\n", value, err) + return util.UNKNOWN + } + + return strings.TrimSpace(string(b)) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go new file mode 100644 index 00000000..bbe81b64 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go @@ -0,0 +1,117 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package linuxpath + +import ( + "fmt" + "path/filepath" + + "github.com/jaypipes/ghw/pkg/context" +) + +// PathRoots holds the roots of all the filesystem subtrees +// ghw wants to access. +type PathRoots struct { + Etc string + Proc string + Run string + Sys string + Var string +} + +// DefaultPathRoots return the canonical default value for PathRoots +func DefaultPathRoots() PathRoots { + return PathRoots{ + Etc: "/etc", + Proc: "/proc", + Run: "/run", + Sys: "/sys", + Var: "/var", + } +} + +// PathRootsFromContext initialize PathRoots from the given Context, +// allowing overrides of the canonical default paths. +func PathRootsFromContext(ctx *context.Context) PathRoots { + roots := DefaultPathRoots() + if pathEtc, ok := ctx.PathOverrides["/etc"]; ok { + roots.Etc = pathEtc + } + if pathProc, ok := ctx.PathOverrides["/proc"]; ok { + roots.Proc = pathProc + } + if pathRun, ok := ctx.PathOverrides["/run"]; ok { + roots.Run = pathRun + } + if pathSys, ok := ctx.PathOverrides["/sys"]; ok { + roots.Sys = pathSys + } + if pathVar, ok := ctx.PathOverrides["/var"]; ok { + roots.Var = pathVar + } + return roots +} + +type Paths struct { + VarLog string + ProcMeminfo string + ProcCpuinfo string + ProcMounts string + SysKernelMMHugepages string + SysBlock string + SysDevicesSystemNode string + SysDevicesSystemMemory string + SysDevicesSystemCPU string + SysBusPciDevices string + SysClassDRM string + SysClassDMI string + SysClassNet string + RunUdevData string +} + +// New returns a new Paths struct containing filepath fields relative to the +// supplied Context +func New(ctx *context.Context) *Paths { + roots := PathRootsFromContext(ctx) + return &Paths{ + VarLog: filepath.Join(ctx.Chroot, roots.Var, "log"), + ProcMeminfo: filepath.Join(ctx.Chroot, roots.Proc, "meminfo"), + ProcCpuinfo: filepath.Join(ctx.Chroot, roots.Proc, "cpuinfo"), + ProcMounts: filepath.Join(ctx.Chroot, roots.Proc, "self", "mounts"), + SysKernelMMHugepages: filepath.Join(ctx.Chroot, roots.Sys, "kernel", "mm", "hugepages"), + SysBlock: filepath.Join(ctx.Chroot, roots.Sys, "block"), + SysDevicesSystemNode: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "node"), + SysDevicesSystemMemory: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "memory"), + SysDevicesSystemCPU: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "cpu"), + SysBusPciDevices: filepath.Join(ctx.Chroot, roots.Sys, "bus", "pci", "devices"), + SysClassDRM: filepath.Join(ctx.Chroot, roots.Sys, "class", "drm"), + SysClassDMI: filepath.Join(ctx.Chroot, roots.Sys, "class", "dmi"), + SysClassNet: filepath.Join(ctx.Chroot, roots.Sys, "class", "net"), + RunUdevData: filepath.Join(ctx.Chroot, roots.Run, "udev", "data"), + } +} + +func (p *Paths) NodeCPU(nodeID int, lpID int) string { + return filepath.Join( + p.SysDevicesSystemNode, + fmt.Sprintf("node%d", nodeID), + fmt.Sprintf("cpu%d", lpID), + ) +} + +func (p *Paths) NodeCPUCache(nodeID int, lpID int) string { + return filepath.Join( + p.NodeCPU(nodeID, lpID), + "cache", + ) +} + +func (p *Paths) NodeCPUCacheIndex(nodeID int, lpID int, cacheIndex int) string { + return filepath.Join( + p.NodeCPUCache(nodeID, lpID), + fmt.Sprintf("index%d", cacheIndex), + ) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go new file mode 100644 index 00000000..e442d6af --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go @@ -0,0 +1,55 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package marshal + +import ( + "encoding/json" + + "github.com/jaypipes/ghw/pkg/context" + yaml "gopkg.in/yaml.v3" +) + +// SafeYAML returns a string after marshalling the supplied parameter into YAML. +func SafeYAML(ctx *context.Context, p interface{}) string { + b, err := json.Marshal(p) + if err != nil { + ctx.Warn("error marshalling JSON: %s", err) + return "" + } + + var jsonObj interface{} + if err := yaml.Unmarshal(b, &jsonObj); err != nil { + ctx.Warn("error converting JSON to YAML: %s", err) + return "" + } + + yb, err := yaml.Marshal(jsonObj) + if err != nil { + ctx.Warn("error marshalling YAML: %s", err) + return "" + } + + return string(yb) +} + +// SafeJSON returns a string after marshalling the supplied parameter into +// JSON. Accepts an optional argument to trigger pretty/indented formatting of +// the JSON string. +func SafeJSON(ctx *context.Context, p interface{}, indent bool) string { + var b []byte + var err error + if !indent { + b, err = json.Marshal(p) + } else { + b, err = json.MarshalIndent(&p, "", " ") + } + if err != nil { + ctx.Warn("error marshalling JSON: %s", err) + return "" + } + return string(b) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go new file mode 100644 index 00000000..f58ba8b9 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go @@ -0,0 +1,116 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "fmt" + "math" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/unitutil" + "github.com/jaypipes/ghw/pkg/util" +) + +// Module describes a single physical memory module for a host system. Pretty +// much all modern systems contain dual in-line memory modules (DIMMs). +// +// See https://en.wikipedia.org/wiki/DIMM +type Module struct { + Label string `json:"label"` + Location string `json:"location"` + SerialNumber string `json:"serial_number"` + SizeBytes int64 `json:"size_bytes"` + Vendor string `json:"vendor"` +} + +// HugePageAmounts describes huge page info +type HugePageAmounts struct { + Total int64 `json:"total"` + Free int64 `json:"free"` + Surplus int64 `json:"surplus"` + // Note: this field will not be populated for Topology call, since data not present in NUMA folder structure + Reserved int64 `json:"reserved"` +} + +// Area describes a set of physical memory on a host system. Non-NUMA systems +// will almost always have a single memory area containing all memory the +// system can use. NUMA systems will have multiple memory areas, one or more +// for each NUMA node/cell in the system. +type Area struct { + TotalPhysicalBytes int64 `json:"total_physical_bytes"` + TotalUsableBytes int64 `json:"total_usable_bytes"` + // An array of sizes, in bytes, of memory pages supported in this area + SupportedPageSizes []uint64 `json:"supported_page_sizes"` + // Default system huge page size, in bytes + DefaultHugePageSize uint64 `json:"default_huge_page_size"` + // Amount of memory, in bytes, consumed by huge pages of all sizes + TotalHugePageBytes int64 `json:"total_huge_page_bytes"` + // Huge page info by size + HugePageAmountsBySize map[uint64]*HugePageAmounts `json:"huge_page_amounts_by_size"` + Modules []*Module `json:"modules"` +} + +// String returns a short string with a summary of information for this memory +// area +func (a *Area) String() string { + tpbs := util.UNKNOWN + if a.TotalPhysicalBytes > 0 { + tpb := a.TotalPhysicalBytes + unit, unitStr := unitutil.AmountString(tpb) + tpb = int64(math.Ceil(float64(a.TotalPhysicalBytes) / float64(unit))) + tpbs = fmt.Sprintf("%d%s", tpb, unitStr) + } + tubs := util.UNKNOWN + if a.TotalUsableBytes > 0 { + tub := a.TotalUsableBytes + unit, unitStr := unitutil.AmountString(tub) + tub = int64(math.Ceil(float64(a.TotalUsableBytes) / float64(unit))) + tubs = fmt.Sprintf("%d%s", tub, unitStr) + } + return fmt.Sprintf("memory (%s physical, %s usable)", tpbs, tubs) +} + +// Info contains information about the memory on a host system. +type Info struct { + ctx *context.Context + Area +} + +// New returns an Info struct that describes the memory on a host system. +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// String returns a short string with a summary of memory information +func (i *Info) String() string { + return i.Area.String() +} + +// simple private struct used to encapsulate memory information in a top-level +// "memory" YAML/JSON map/object key +type memoryPrinter struct { + Info *Info `json:"memory"` +} + +// YAMLString returns a string with the memory information formatted as YAML +// under a top-level "memory:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, memoryPrinter{i}) +} + +// JSONString returns a string with the memory information formatted as JSON +// under a top-level "memory:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, memoryPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go new file mode 100644 index 00000000..2d8f88d7 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go @@ -0,0 +1,155 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/unitutil" +) + +// CacheType indicates the type of memory stored in a memory cache. +type CacheType int + +const ( + // CacheTypeUnified indicates the memory cache stores both instructions and + // data. + CacheTypeUnified CacheType = iota + // CacheTypeInstruction indicates the memory cache stores only instructions + // (executable bytecode). + CacheTypeInstruction + // CacheTypeData indicates the memory cache stores only data + // (non-executable bytecode). + CacheTypeData +) + +const ( + // DEPRECATED: Please use CacheTypeUnified + CACHE_TYPE_UNIFIED = CacheTypeUnified + // DEPRECATED: Please use CacheTypeUnified + CACHE_TYPE_INSTRUCTION = CacheTypeInstruction + // DEPRECATED: Please use CacheTypeUnified + CACHE_TYPE_DATA = CacheTypeData +) + +var ( + memoryCacheTypeString = map[CacheType]string{ + CacheTypeUnified: "Unified", + CacheTypeInstruction: "Instruction", + CacheTypeData: "Data", + } + + // NOTE(fromani): the keys are all lowercase and do not match + // the keys in the opposite table `memoryCacheTypeString`. + // This is done because of the choice we made in + // CacheType:MarshalJSON. + // We use this table only in UnmarshalJSON, so it should be OK. + stringMemoryCacheType = map[string]CacheType{ + "unified": CacheTypeUnified, + "instruction": CacheTypeInstruction, + "data": CacheTypeData, + } +) + +func (a CacheType) String() string { + return memoryCacheTypeString[a] +} + +// NOTE(jaypipes): since serialized output is as "official" as we're going to +// get, let's lowercase the string output when serializing, in order to +// "normalize" the expected serialized output +func (a CacheType) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.ToLower(a.String()))), nil +} + +func (a *CacheType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + key := strings.ToLower(s) + val, ok := stringMemoryCacheType[key] + if !ok { + return fmt.Errorf("unknown memory cache type: %q", key) + } + *a = val + return nil +} + +type SortByCacheLevelTypeFirstProcessor []*Cache + +func (a SortByCacheLevelTypeFirstProcessor) Len() int { return len(a) } +func (a SortByCacheLevelTypeFirstProcessor) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SortByCacheLevelTypeFirstProcessor) Less(i, j int) bool { + if a[i].Level < a[j].Level { + return true + } else if a[i].Level == a[j].Level { + if a[i].Type < a[j].Type { + return true + } else if a[i].Type == a[j].Type { + // NOTE(jaypipes): len(LogicalProcessors) is always >0 and is always + // sorted lowest LP ID to highest LP ID + return a[i].LogicalProcessors[0] < a[j].LogicalProcessors[0] + } + } + return false +} + +type SortByLogicalProcessorId []uint32 + +func (a SortByLogicalProcessorId) Len() int { return len(a) } +func (a SortByLogicalProcessorId) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SortByLogicalProcessorId) Less(i, j int) bool { return a[i] < a[j] } + +// Cache contains information about a single memory cache on a physical CPU +// package. Caches have a 1-based numeric level, with lower numbers indicating +// the cache is "closer" to the processing cores and reading memory from the +// cache will be faster relative to caches with higher levels. Note that this +// has nothing to do with RAM or memory modules like DIMMs. +type Cache struct { + // Level is a 1-based numeric level that indicates the relative closeness + // of this cache to processing cores on the physical package. Lower numbers + // are "closer" to the processing cores and therefore have faster access + // times. + Level uint8 `json:"level"` + // Type indicates what type of memory is stored in the cache. Can be + // instruction (executable bytecodes), data or both. + Type CacheType `json:"type"` + // SizeBytes indicates the size of the cache in bytes. + SizeBytes uint64 `json:"size_bytes"` + // The set of logical processors (hardware threads) that have access to + // this cache. + LogicalProcessors []uint32 `json:"logical_processors"` +} + +func (c *Cache) String() string { + sizeKb := c.SizeBytes / uint64(unitutil.KB) + typeStr := "" + if c.Type == CacheTypeInstruction { + typeStr = "i" + } else if c.Type == CacheTypeData { + typeStr = "d" + } + cacheIDStr := fmt.Sprintf("L%d%s", c.Level, typeStr) + processorMapStr := "" + if c.LogicalProcessors != nil { + lpStrings := make([]string, len(c.LogicalProcessors)) + for x, lpid := range c.LogicalProcessors { + lpStrings[x] = strconv.Itoa(int(lpid)) + } + processorMapStr = " shared with logical processors: " + strings.Join(lpStrings, ",") + } + return fmt.Sprintf( + "%s cache (%d KB)%s", + cacheIDStr, + sizeKb, + processorMapStr, + ) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go new file mode 100644 index 00000000..12258ea4 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go @@ -0,0 +1,187 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/unitutil" +) + +func CachesForNode(ctx *context.Context, nodeID int) ([]*Cache, error) { + // The /sys/devices/node/nodeX directory contains a subdirectory called + // 'cpuX' for each logical processor assigned to the node. Each of those + // subdirectories containers a 'cache' subdirectory which contains a number + // of subdirectories beginning with 'index' and ending in the cache's + // internal 0-based identifier. Those subdirectories contain a number of + // files, including 'shared_cpu_list', 'size', and 'type' which we use to + // determine cache characteristics. + paths := linuxpath.New(ctx) + path := filepath.Join( + paths.SysDevicesSystemNode, + fmt.Sprintf("node%d", nodeID), + ) + caches := make(map[string]*Cache) + + files, err := os.ReadDir(path) + if err != nil { + return nil, err + } + for _, file := range files { + filename := file.Name() + if !strings.HasPrefix(filename, "cpu") { + continue + } + if filename == "cpumap" || filename == "cpulist" { + // There are two files in the node directory that start with 'cpu' + // but are not subdirectories ('cpulist' and 'cpumap'). Ignore + // these files. + continue + } + // Grab the logical processor ID by cutting the integer from the + // /sys/devices/system/node/nodeX/cpuX filename + cpuPath := filepath.Join(path, filename) + lpID, _ := strconv.Atoi(filename[3:]) + + // Inspect the caches for each logical processor. There will be a + // /sys/devices/system/node/nodeX/cpuX/cache directory containing a + // number of directories beginning with the prefix "index" followed by + // a number. The number indicates the level of the cache, which + // indicates the "distance" from the processor. Each of these + // directories contains information about the size of that level of + // cache and the processors mapped to it. + cachePath := filepath.Join(cpuPath, "cache") + if _, err = os.Stat(cachePath); errors.Is(err, os.ErrNotExist) { + continue + } + cacheDirFiles, err := os.ReadDir(cachePath) + if err != nil { + return nil, err + } + for _, cacheDirFile := range cacheDirFiles { + cacheDirFileName := cacheDirFile.Name() + if !strings.HasPrefix(cacheDirFileName, "index") { + continue + } + cacheIndex, _ := strconv.Atoi(cacheDirFileName[5:]) + + // The cache information is repeated for each node, so here, we + // just ensure that we only have a one Cache object for each + // unique combination of level, type and processor map + level := memoryCacheLevel(ctx, paths, nodeID, lpID, cacheIndex) + cacheType := memoryCacheType(ctx, paths, nodeID, lpID, cacheIndex) + sharedCpuMap := memoryCacheSharedCPUMap(ctx, paths, nodeID, lpID, cacheIndex) + cacheKey := fmt.Sprintf("%d-%d-%s", level, cacheType, sharedCpuMap) + + cache, exists := caches[cacheKey] + if !exists { + size := memoryCacheSize(ctx, paths, nodeID, lpID, level) + cache = &Cache{ + Level: uint8(level), + Type: cacheType, + SizeBytes: uint64(size) * uint64(unitutil.KB), + LogicalProcessors: make([]uint32, 0), + } + caches[cacheKey] = cache + } + cache.LogicalProcessors = append( + cache.LogicalProcessors, + uint32(lpID), + ) + } + } + + cacheVals := make([]*Cache, len(caches)) + x := 0 + for _, c := range caches { + // ensure the cache's processor set is sorted by logical process ID + sort.Sort(SortByLogicalProcessorId(c.LogicalProcessors)) + cacheVals[x] = c + x++ + } + + return cacheVals, nil +} + +func memoryCacheLevel(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int { + levelPath := filepath.Join( + paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex), + "level", + ) + levelContents, err := os.ReadFile(levelPath) + if err != nil { + ctx.Warn("%s", err) + return -1 + } + // levelContents is now a []byte with the last byte being a newline + // character. Trim that off and convert the contents to an integer. + level, err := strconv.Atoi(string(levelContents[:len(levelContents)-1])) + if err != nil { + ctx.Warn("Unable to parse int from %s", levelContents) + return -1 + } + return level +} + +func memoryCacheSize(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int { + sizePath := filepath.Join( + paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex), + "size", + ) + sizeContents, err := os.ReadFile(sizePath) + if err != nil { + ctx.Warn("%s", err) + return -1 + } + // size comes as XK\n, so we trim off the K and the newline. + size, err := strconv.Atoi(string(sizeContents[:len(sizeContents)-2])) + if err != nil { + ctx.Warn("Unable to parse int from %s", sizeContents) + return -1 + } + return size +} + +func memoryCacheType(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) CacheType { + typePath := filepath.Join( + paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex), + "type", + ) + cacheTypeContents, err := os.ReadFile(typePath) + if err != nil { + ctx.Warn("%s", err) + return CacheTypeUnified + } + switch string(cacheTypeContents[:len(cacheTypeContents)-1]) { + case "Data": + return CacheTypeData + case "Instruction": + return CacheTypeInstruction + default: + return CacheTypeUnified + } +} + +func memoryCacheSharedCPUMap(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) string { + scpuPath := filepath.Join( + paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex), + "shared_cpu_map", + ) + sharedCpuMap, err := os.ReadFile(scpuPath) + if err != nil { + ctx.Warn("%s", err) + return "" + } + return string(sharedCpuMap[:len(sharedCpuMap)-1]) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go new file mode 100644 index 00000000..53acfd5c --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go @@ -0,0 +1,446 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "bufio" + "compress/gzip" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/unitutil" + "github.com/jaypipes/ghw/pkg/util" +) + +const ( + warnCannotDeterminePhysicalMemory = ` +Could not determine total physical bytes of memory. This may +be due to the host being a virtual machine or container with no +/var/log/syslog file or /sys/devices/system/memory directory, or +the current user may not have necessary privileges to read the syslog. +We are falling back to setting the total physical amount of memory to +the total usable amount of memory +` +) + +var ( + // System log lines will look similar to the following: + // ... kernel: [0.000000] Memory: 24633272K/25155024K ... + regexSyslogMemline = regexp.MustCompile(`Memory:\s+\d+K\/(\d+)K`) + // regexMemoryBlockDirname matches a subdirectory in either + // /sys/devices/system/memory or /sys/devices/system/node/nodeX that + // represents information on a specific memory cell/block + regexMemoryBlockDirname = regexp.MustCompile(`memory\d+$`) +) + +func (i *Info) load() error { + paths := linuxpath.New(i.ctx) + tub := memTotalUsableBytes(paths) + if tub < 1 { + return fmt.Errorf("Could not determine total usable bytes of memory") + } + i.TotalUsableBytes = tub + tpb := memTotalPhysicalBytes(paths) + i.TotalPhysicalBytes = tpb + if tpb < 1 { + i.ctx.Warn(warnCannotDeterminePhysicalMemory) + i.TotalPhysicalBytes = tub + } + i.SupportedPageSizes, _ = memorySupportedPageSizes(paths.SysKernelMMHugepages) + i.DefaultHugePageSize, _ = memoryDefaultHPSizeFromPath(paths.ProcMeminfo) + i.TotalHugePageBytes, _ = memoryHugeTLBFromPath(paths.ProcMeminfo) + hugePageAmounts := make(map[uint64]*HugePageAmounts) + for _, p := range i.SupportedPageSizes { + info, err := memoryHPInfo(paths.SysKernelMMHugepages, p) + if err != nil { + return err + } + hugePageAmounts[p] = info + } + i.HugePageAmountsBySize = hugePageAmounts + return nil +} + +func AreaForNode(ctx *context.Context, nodeID int) (*Area, error) { + paths := linuxpath.New(ctx) + path := filepath.Join( + paths.SysDevicesSystemNode, + fmt.Sprintf("node%d", nodeID), + ) + + var err error + var blockSizeBytes uint64 + var totPhys int64 + var totUsable int64 + + totUsable, err = memoryTotalUsableBytesFromPath(filepath.Join(path, "meminfo")) + if err != nil { + return nil, err + } + + blockSizeBytes, err = memoryBlockSizeBytes(paths.SysDevicesSystemMemory) + if err == nil { + totPhys, err = memoryTotalPhysicalBytesFromPath(path, blockSizeBytes) + if err != nil { + return nil, err + } + } else { + // NOTE(jaypipes): Some platforms (e.g. ARM) will not have a + // /sys/device/system/memory/block_size_bytes file. If this is the + // case, we set physical bytes equal to either the physical memory + // determined from syslog or the usable bytes + // + // see: https://bugzilla.redhat.com/show_bug.cgi?id=1794160 + // see: https://github.com/jaypipes/ghw/issues/336 + totPhys = memTotalPhysicalBytesFromSyslog(paths) + } + + supportedHP, err := memorySupportedPageSizes(filepath.Join(path, "hugepages")) + if err != nil { + return nil, err + } + + defHPSize, err := memoryDefaultHPSizeFromPath(paths.ProcMeminfo) + if err != nil { + return nil, err + } + + totHPSize, err := memoryHugeTLBFromPath(paths.ProcMeminfo) + if err != nil { + return nil, err + } + + hugePageAmounts := make(map[uint64]*HugePageAmounts) + for _, p := range supportedHP { + info, err := memoryHPInfo(filepath.Join(path, "hugepages"), p) + if err != nil { + return nil, err + } + hugePageAmounts[p] = info + } + + return &Area{ + TotalPhysicalBytes: totPhys, + TotalUsableBytes: totUsable, + SupportedPageSizes: supportedHP, + DefaultHugePageSize: defHPSize, + TotalHugePageBytes: totHPSize, + HugePageAmountsBySize: hugePageAmounts, + }, nil +} + +func memoryBlockSizeBytes(dir string) (uint64, error) { + // get the memory block size in byte in hexadecimal notation + blockSize := filepath.Join(dir, "block_size_bytes") + + d, err := os.ReadFile(blockSize) + if err != nil { + return 0, err + } + return strconv.ParseUint(strings.TrimSpace(string(d)), 16, 64) +} + +func memTotalPhysicalBytes(paths *linuxpath.Paths) (total int64) { + defer func() { + // fallback to the syslog file approach in case of error + if total < 0 { + total = memTotalPhysicalBytesFromSyslog(paths) + } + }() + + // detect physical memory from /sys/devices/system/memory + dir := paths.SysDevicesSystemMemory + blockSizeBytes, err := memoryBlockSizeBytes(dir) + if err != nil { + total = -1 + return total + } + + total, err = memoryTotalPhysicalBytesFromPath(dir, blockSizeBytes) + if err != nil { + total = -1 + } + return total +} + +// memoryTotalPhysicalBytesFromPath accepts a directory -- either +// /sys/devices/system/memory (for the entire system) or +// /sys/devices/system/node/nodeX (for a specific NUMA node) -- and a block +// size in bytes and iterates over the sysfs memory block subdirectories, +// accumulating blocks that are "online" to determine a total physical memory +// size in bytes +func memoryTotalPhysicalBytesFromPath(dir string, blockSizeBytes uint64) (int64, error) { + var total int64 + files, err := os.ReadDir(dir) + if err != nil { + return -1, err + } + // There are many subdirectories of /sys/devices/system/memory or + // /sys/devices/system/node/nodeX that are named memory{cell} where {cell} + // is a 0-based index of the memory block. These subdirectories contain a + // state file (e.g. /sys/devices/system/memory/memory64/state that will + // contain the string "online" if that block is active. + for _, file := range files { + fname := file.Name() + // NOTE(jaypipes): we cannot rely on file.IsDir() here because the + // memory{cell} sysfs directories are not actual directories. + if !regexMemoryBlockDirname.MatchString(fname) { + continue + } + s, err := os.ReadFile(filepath.Join(dir, fname, "state")) + if err != nil { + return -1, err + } + // if the memory block state is 'online' we increment the total with + // the memory block size to determine the amount of physical + // memory available on this system. + if strings.TrimSpace(string(s)) != "online" { + continue + } + total += int64(blockSizeBytes) + } + return total, nil +} + +func memTotalPhysicalBytesFromSyslog(paths *linuxpath.Paths) int64 { + // In Linux, the total physical memory can be determined by looking at the + // output of dmidecode, however dmidecode requires root privileges to run, + // so instead we examine the system logs for startup information containing + // total physical memory and cache the results of this. + findPhysicalKb := func(line string) int64 { + matches := regexSyslogMemline.FindStringSubmatch(line) + if len(matches) == 2 { + i, err := strconv.Atoi(matches[1]) + if err != nil { + return -1 + } + return int64(i * 1024) + } + return -1 + } + + // /var/log will contain a file called syslog and 0 or more files called + // syslog.$NUMBER or syslog.$NUMBER.gz containing system log records. We + // search each, stopping when we match a system log record line that + // contains physical memory information. + logDir := paths.VarLog + logFiles, err := os.ReadDir(logDir) + if err != nil { + return -1 + } + for _, file := range logFiles { + if strings.HasPrefix(file.Name(), "syslog") { + fullPath := filepath.Join(logDir, file.Name()) + unzip := strings.HasSuffix(file.Name(), ".gz") + var r io.ReadCloser + r, err = os.Open(fullPath) + if err != nil { + return -1 + } + defer util.SafeClose(r) + if unzip { + r, err = gzip.NewReader(r) + if err != nil { + return -1 + } + } + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + size := findPhysicalKb(line) + if size > 0 { + return size + } + } + } + } + return -1 +} + +func memTotalUsableBytes(paths *linuxpath.Paths) int64 { + amount, err := memoryTotalUsableBytesFromPath(paths.ProcMeminfo) + if err != nil { + return -1 + } + return amount +} + +func memorySupportedPageSizes(hpDir string) ([]uint64, error) { + // In Linux, /sys/kernel/mm/hugepages contains a directory per page size + // supported by the kernel. The directory name corresponds to the pattern + // 'hugepages-{pagesize}kb' + out := make([]uint64, 0) + + files, err := os.ReadDir(hpDir) + if err != nil { + return out, err + } + for _, file := range files { + parts := strings.Split(file.Name(), "-") + sizeStr := parts[1] + // Cut off the 'kb' + sizeStr = sizeStr[0 : len(sizeStr)-2] + size, err := strconv.Atoi(sizeStr) + if err != nil { + return out, err + } + out = append(out, uint64(size*int(unitutil.KB))) + } + return out, nil +} + +func memoryHPInfo(hpDir string, sizeBytes uint64) (*HugePageAmounts, error) { + // In linux huge page info can be obtained in several places + // /sys/kernel/mm/hugepages/hugepages-{pagesize}kb/ directory, which contains + // nr_hugepages + // nr_hugepages_mempolicy + // nr_overcommit_hugepages + // free_hugepages + // resv_hugepages + // surplus_hugepages + // or NUMA specific data /sys/devices/system/node/node[0-9]*/hugepages/hugepages-{pagesize}kb/, which contains + // nr_hugepages + // free_hugepages + // surplus_hugepages + targetPath := filepath.Join(hpDir, fmt.Sprintf("hugepages-%vkB", sizeBytes/uint64(unitutil.KB))) + files, err := os.ReadDir(targetPath) + if err != nil { + return nil, err + } + + var ( + total int64 + free int64 + surplus int64 + reserved int64 + ) + + for _, f := range files { + switch f.Name() { + case "nr_hugepages": + count, err := readFileToInt64(path.Join(targetPath, f.Name())) + if err != nil { + return nil, err + } + total = count + case "free_hugepages": + count, err := readFileToInt64(path.Join(targetPath, f.Name())) + if err != nil { + return nil, err + } + free = count + case "surplus_hugepages": + count, err := readFileToInt64(path.Join(targetPath, f.Name())) + if err != nil { + return nil, err + } + surplus = count + case "resv_hugepages": + count, err := readFileToInt64(path.Join(targetPath, f.Name())) + if err != nil { + return nil, err + } + reserved = count + } + } + + return &HugePageAmounts{ + Total: total, + Free: free, + Surplus: surplus, + Reserved: reserved, + }, nil +} + +func memoryTotalUsableBytesFromPath(meminfoPath string) (int64, error) { + const key = "MemTotal" + return getMemInfoField(meminfoPath, key) +} + +func memoryDefaultHPSizeFromPath(meminfoPath string) (uint64, error) { + const key = "Hugepagesize" + got, err := getMemInfoField(meminfoPath, key) + if err != nil { + return 0, err + } + return uint64(got), nil +} + +func memoryHugeTLBFromPath(meminfoPath string) (int64, error) { + const key = "Hugetlb" + return getMemInfoField(meminfoPath, key) +} + +func getMemInfoField(meminfoPath string, wantKey string) (int64, error) { + // In Linux, /proc/meminfo or its close relative + // /sys/devices/system/node/node*/meminfo + // contains a set of memory-related amounts, with + // lines looking like the following: + // + // $ cat /proc/meminfo + // MemTotal: 24677596 kB + // MemFree: 21244356 kB + // MemAvailable: 22085432 kB + // ... + // HugePages_Total: 0 + // HugePages_Free: 0 + // HugePages_Rsvd: 0 + // HugePages_Surp: 0 + // ... + // + // It's worth noting that /proc/meminfo returns exact information, not + // "theoretical" information. For instance, on the above system, I have + // 24GB of RAM but MemTotal is indicating only around 23GB. This is because + // MemTotal contains the exact amount of *usable* memory after accounting + // for the kernel's resident memory size and a few reserved bits. + // Please note GHW cares about the subset of lines shared between system-wide + // and per-NUMA-node meminfos. For more information, see: + // + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + r, err := os.Open(meminfoPath) + if err != nil { + return -1, err + } + defer util.SafeClose(r) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Split(line, ":") + key := parts[0] + if !strings.Contains(key, wantKey) { + continue + } + rawValue := parts[1] + inKb := strings.HasSuffix(rawValue, "kB") + value, err := strconv.Atoi(strings.TrimSpace(strings.TrimSuffix(rawValue, "kB"))) + if err != nil { + return -1, err + } + if inKb { + value = value * int(unitutil.KB) + } + return int64(value), nil + } + return -1, fmt.Errorf("failed to find '%s' entry in path %q", wantKey, meminfoPath) +} + +func readFileToInt64(filename string) (int64, error) { + data, err := os.ReadFile(filename) + if err != nil { + return -1, err + } + return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go new file mode 100644 index 00000000..6ce99e00 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("mem.Info.load not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go new file mode 100644 index 00000000..c3a3945c --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go @@ -0,0 +1,72 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package memory + +import ( + "github.com/StackExchange/wmi" + + "github.com/jaypipes/ghw/pkg/unitutil" +) + +const wqlOperatingSystem = "SELECT TotalVisibleMemorySize FROM Win32_OperatingSystem" + +type win32OperatingSystem struct { + TotalVisibleMemorySize *uint64 +} + +const wqlPhysicalMemory = "SELECT BankLabel, Capacity, DataWidth, Description, DeviceLocator, Manufacturer, Model, Name, PartNumber, PositionInRow, SerialNumber, Speed, Tag, TotalWidth FROM Win32_PhysicalMemory" + +type win32PhysicalMemory struct { + BankLabel *string + Capacity *uint64 + DataWidth *uint16 + Description *string + DeviceLocator *string + Manufacturer *string + Model *string + Name *string + PartNumber *string + PositionInRow *uint32 + SerialNumber *string + Speed *uint32 + Tag *string + TotalWidth *uint16 +} + +func (i *Info) load() error { + // Getting info from WMI + var win32OSDescriptions []win32OperatingSystem + if err := wmi.Query(wqlOperatingSystem, &win32OSDescriptions); err != nil { + return err + } + var win32MemDescriptions []win32PhysicalMemory + if err := wmi.Query(wqlPhysicalMemory, &win32MemDescriptions); err != nil { + return err + } + // We calculate total physical memory size by summing the DIMM sizes + var totalPhysicalBytes uint64 + i.Modules = make([]*Module, 0, len(win32MemDescriptions)) + for _, description := range win32MemDescriptions { + totalPhysicalBytes += *description.Capacity + i.Modules = append(i.Modules, &Module{ + Label: *description.BankLabel, + Location: *description.DeviceLocator, + SerialNumber: *description.SerialNumber, + SizeBytes: int64(*description.Capacity), + Vendor: *description.Manufacturer, + }) + } + var totalUsableBytes uint64 + for _, description := range win32OSDescriptions { + // TotalVisibleMemorySize is the amount of memory available for us by + // the operating system **in Kilobytes** + totalUsableBytes += *description.TotalVisibleMemorySize * uint64(unitutil.KB) + } + i.TotalUsableBytes = int64(totalUsableBytes) + i.TotalPhysicalBytes = int64(totalPhysicalBytes) + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net.go b/vendor/github.com/jaypipes/ghw/pkg/net/net.go new file mode 100644 index 00000000..e26dab70 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/net/net.go @@ -0,0 +1,140 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package net + +import ( + "fmt" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" +) + +// NICCapability is a feature/capability of a Network Interface Controller +// (NIC) +type NICCapability struct { + // Name is the string name for the capability, e.g. + // "tcp-segmentation-offload" + Name string `json:"name"` + // IsEnabled is true if the capability is currently enabled on the NIC, + // false otherwise. + IsEnabled bool `json:"is_enabled"` + // CanEnable is true if the capability can be enabled on the NIC, false + // otherwise. + CanEnable bool `json:"can_enable"` +} + +// NIC contains information about a single Network Interface Controller (NIC). +type NIC struct { + // Name is the string identifier the system gave this NIC. + Name string `json:"name"` + // MACAddress is the Media Access Control (MAC) address of this NIC. + MACAddress string `json:"mac_address"` + // DEPRECATED: Please use MACAddress instead. + MacAddress string `json:"-"` + // IsVirtual is true if the NIC is entirely virtual/emulated, false + // otherwise. + IsVirtual bool `json:"is_virtual"` + // Capabilities is a slice of pointers to `NICCapability` structs + // describing a feature/capability of this NIC. + Capabilities []*NICCapability `json:"capabilities"` + // PCIAddress is a pointer to the PCI address for this NIC, or nil if there + // is no PCI address for this NIC. + PCIAddress *string `json:"pci_address,omitempty"` + // Speed is a string describing the link speed of this NIC, e.g. "1000Mb/s" + Speed string `json:"speed"` + // Duplex is a string indicating the current duplex setting of this NIC, + // e.g. "Full" + Duplex string `json:"duplex"` + // SupportedLinkModes is a slice of strings containing the supported link + // modes of this NIC, e.g. "10baseT/Half", "1000baseT/Full", etc. + SupportedLinkModes []string `json:"supported_link_modes,omitempty"` + // SupportedPorts is a slice of strings containing the supported physical + // ports on this NIC, e.g. "Twisted Pair" + SupportedPorts []string `json:"supported_ports,omitempty"` + // SupportedFECModes is a slice of strings containing the supported Forward + // Error Correction (FEC) modes for this NIC. + SupportedFECModes []string `json:"supported_fec_modes,omitempty"` + // AdvertiseLinkModes is a slice of strings containing the advertised + // (during auto-negotiation) link modes of this NIC, e.g. "10baseT/Half", + // "1000baseT/Full", etc. + AdvertisedLinkModes []string `json:"advertised_link_modes,omitempty"` + // AvertisedFECModes is a slice of strings containing the advertised + // (during auto-negotiation) Forward Error Correction (FEC) modes for this + // NIC. + AdvertisedFECModes []string `json:"advertised_fec_modes,omitempty"` + // TODO(fromani): add other hw addresses (USB) when we support them +} + +// String returns a short string with information about the NIC capability. +func (nc *NICCapability) String() string { + return fmt.Sprintf( + "{Name:%s IsEnabled:%t CanEnable:%t}", + nc.Name, + nc.IsEnabled, + nc.CanEnable, + ) +} + +// String returns a short string with information about the NIC. +func (n *NIC) String() string { + isVirtualStr := "" + if n.IsVirtual { + isVirtualStr = " (virtual)" + } + return fmt.Sprintf( + "%s%s", + n.Name, + isVirtualStr, + ) +} + +// Info describes all network interface controllers (NICs) in the host system. +type Info struct { + ctx *context.Context + // NICs is a slice of pointers to `NIC` structs describing the network + // interface controllers (NICs) on the host system. + NICs []*NIC `json:"nics"` +} + +// New returns a pointer to an Info struct that contains information about the +// network interface controllers (NICs) on the host system +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// String returns a short string with information about the networking on the +// host system. +func (i *Info) String() string { + return fmt.Sprintf( + "net (%d NICs)", + len(i.NICs), + ) +} + +// simple private struct used to encapsulate net information in a +// top-level "net" YAML/JSON map/object key +type netPrinter struct { + Info *Info `json:"network"` +} + +// YAMLString returns a string with the net information formatted as YAML +// under a top-level "net:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, netPrinter{i}) +} + +// JSONString returns a string with the net information formatted as JSON +// under a top-level "net:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, netPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go new file mode 100644 index 00000000..d7d7e8ca --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go @@ -0,0 +1,358 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package net + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/util" +) + +const ( + warnEthtoolNotInstalled = `ethtool not installed. Cannot grab NIC capabilities` +) + +func (i *Info) load() error { + i.NICs = nics(i.ctx) + return nil +} + +func nics(ctx *context.Context) []*NIC { + nics := make([]*NIC, 0) + + paths := linuxpath.New(ctx) + files, err := os.ReadDir(paths.SysClassNet) + if err != nil { + return nics + } + + etAvailable := ctx.EnableTools + if etAvailable { + if etInstalled := ethtoolInstalled(); !etInstalled { + ctx.Warn(warnEthtoolNotInstalled) + etAvailable = false + } + } + + for _, file := range files { + filename := file.Name() + // Ignore loopback... + if filename == "lo" { + continue + } + + netPath := filepath.Join(paths.SysClassNet, filename) + dest, _ := os.Readlink(netPath) + isVirtual := false + if strings.Contains(dest, "devices/virtual/net") { + isVirtual = true + } + + nic := &NIC{ + Name: filename, + IsVirtual: isVirtual, + } + + mac := netDeviceMacAddress(paths, filename) + nic.MacAddress = mac + nic.MACAddress = mac + if etAvailable { + nic.netDeviceParseEthtool(ctx, filename) + } else { + nic.Capabilities = []*NICCapability{} + // Sets NIC struct fields from data in SysFs + nic.setNicAttrSysFs(paths, filename) + } + + nic.PCIAddress = netDevicePCIAddress(paths.SysClassNet, filename) + + nics = append(nics, nic) + } + return nics +} + +func netDeviceMacAddress(paths *linuxpath.Paths, dev string) string { + // Instead of use udevadm, we can get the device's MAC address by examing + // the /sys/class/net/$DEVICE/address file in sysfs. However, for devices + // that have addr_assign_type != 0, return None since the MAC address is + // random. + aatPath := filepath.Join(paths.SysClassNet, dev, "addr_assign_type") + contents, err := os.ReadFile(aatPath) + if err != nil { + return "" + } + if strings.TrimSpace(string(contents)) != "0" { + return "" + } + addrPath := filepath.Join(paths.SysClassNet, dev, "address") + contents, err = os.ReadFile(addrPath) + if err != nil { + return "" + } + return strings.TrimSpace(string(contents)) +} + +func ethtoolInstalled() bool { + _, err := exec.LookPath("ethtool") + return err == nil +} + +func (n *NIC) netDeviceParseEthtool(ctx *context.Context, dev string) { + var out bytes.Buffer + path, _ := exec.LookPath("ethtool") + + // Get auto-negotiation and pause-frame-use capabilities from "ethtool" (with no options) + // Populate Speed, Duplex, SupportedLinkModes, SupportedPorts, SupportedFECModes, + // AdvertisedLinkModes, and AdvertisedFECModes attributes from "ethtool" output. + cmd := exec.Command(path, dev) + cmd.Stdout = &out + err := cmd.Run() + if err == nil { + m := parseNicAttrEthtool(&out) + n.Capabilities = append(n.Capabilities, autoNegCap(m)) + n.Capabilities = append(n.Capabilities, pauseFrameUseCap(m)) + + // Update NIC Attributes with ethtool output + n.Speed = strings.Join(m["Speed"], "") + n.Duplex = strings.Join(m["Duplex"], "") + n.SupportedLinkModes = m["Supported link modes"] + n.SupportedPorts = m["Supported ports"] + n.SupportedFECModes = m["Supported FEC modes"] + n.AdvertisedLinkModes = m["Advertised link modes"] + n.AdvertisedFECModes = m["Advertised FEC modes"] + } else { + msg := fmt.Sprintf("could not grab NIC link info for %s: %s", dev, err) + ctx.Warn(msg) + } + + // Get all other capabilities from "ethtool -k" + cmd = exec.Command(path, "-k", dev) + cmd.Stdout = &out + err = cmd.Run() + if err == nil { + // The out variable will now contain something that looks like the + // following. + // + // Features for enp58s0f1: + // rx-checksumming: on + // tx-checksumming: off + // tx-checksum-ipv4: off + // tx-checksum-ip-generic: off [fixed] + // tx-checksum-ipv6: off + // tx-checksum-fcoe-crc: off [fixed] + // tx-checksum-sctp: off [fixed] + // scatter-gather: off + // tx-scatter-gather: off + // tx-scatter-gather-fraglist: off [fixed] + // tcp-segmentation-offload: off + // tx-tcp-segmentation: off + // tx-tcp-ecn-segmentation: off [fixed] + // tx-tcp-mangleid-segmentation: off + // tx-tcp6-segmentation: off + // < snipped > + scanner := bufio.NewScanner(&out) + // Skip the first line... + scanner.Scan() + for scanner.Scan() { + line := strings.TrimPrefix(scanner.Text(), "\t") + n.Capabilities = append(n.Capabilities, netParseEthtoolFeature(line)) + } + + } else { + msg := fmt.Sprintf("could not grab NIC capabilities for %s: %s", dev, err) + ctx.Warn(msg) + } + +} + +// netParseEthtoolFeature parses a line from the ethtool -k output and returns +// a NICCapability. +// +// The supplied line will look like the following: +// +// tx-checksum-ip-generic: off [fixed] +// +// [fixed] indicates that the feature may not be turned on/off. Note: it makes +// no difference whether a privileged user runs `ethtool -k` when determining +// whether [fixed] appears for a feature. +func netParseEthtoolFeature(line string) *NICCapability { + parts := strings.Fields(line) + cap := strings.TrimSuffix(parts[0], ":") + enabled := parts[1] == "on" + fixed := len(parts) == 3 && parts[2] == "[fixed]" + return &NICCapability{ + Name: cap, + IsEnabled: enabled, + CanEnable: !fixed, + } +} + +func netDevicePCIAddress(netDevDir, netDevName string) *string { + // what we do here is not that hard in the end: we need to navigate the sysfs + // up to the directory belonging to the device backing the network interface. + // we can make few relatively safe assumptions, but the safest way is follow + // the right links. And so we go. + // First of all, knowing the network device name we need to resolve the backing + // device path to its full sysfs path. + // say we start with netDevDir="/sys/class/net" and netDevName="enp0s31f6" + netPath := filepath.Join(netDevDir, netDevName) + dest, err := os.Readlink(netPath) + if err != nil { + // bail out with empty value + return nil + } + // now we have something like dest="../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6" + // remember the path is relative to netDevDir="/sys/class/net" + + netDev := filepath.Clean(filepath.Join(netDevDir, dest)) + // so we clean "/sys/class/net/../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6" + // leading to "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6" + // still not there. We need to access the data of the pci device. So we jump into the path + // linked to the "device" pseudofile + dest, err = os.Readlink(filepath.Join(netDev, "device")) + if err != nil { + // bail out with empty value + return nil + } + // we expect something like="../../../0000:00:1f.6" + + devPath := filepath.Clean(filepath.Join(netDev, dest)) + // so we clean "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6/../../../0000:00:1f.6" + // leading to "/sys/devices/pci0000:00/0000:00:1f.6/" + // finally here! + + // to which bus is this device connected to? + dest, err = os.Readlink(filepath.Join(devPath, "subsystem")) + if err != nil { + // bail out with empty value + return nil + } + // ok, this is hacky, but since we need the last *two* path components and we know we + // are running on linux... + if !strings.HasSuffix(dest, "/bus/pci") { + // unsupported and unexpected bus! + return nil + } + + pciAddr := filepath.Base(devPath) + return &pciAddr +} + +func (nic *NIC) setNicAttrSysFs(paths *linuxpath.Paths, dev string) { + // Get speed and duplex from /sys/class/net/$DEVICE/ directory + nic.Speed = readFile(filepath.Join(paths.SysClassNet, dev, "speed")) + nic.Duplex = readFile(filepath.Join(paths.SysClassNet, dev, "duplex")) +} + +func readFile(path string) string { + contents, err := os.ReadFile(path) + if err != nil { + return "" + } + return strings.TrimSpace(string(contents)) +} + +func autoNegCap(m map[string][]string) *NICCapability { + autoNegotiation := NICCapability{Name: "auto-negotiation", IsEnabled: false, CanEnable: false} + + an, anErr := util.ParseBool(strings.Join(m["Auto-negotiation"], "")) + aan, aanErr := util.ParseBool(strings.Join(m["Advertised auto-negotiation"], "")) + if an && aan && aanErr == nil && anErr == nil { + autoNegotiation.IsEnabled = true + } + + san, err := util.ParseBool(strings.Join(m["Supports auto-negotiation"], "")) + if san && err == nil { + autoNegotiation.CanEnable = true + } + + return &autoNegotiation +} + +func pauseFrameUseCap(m map[string][]string) *NICCapability { + pauseFrameUse := NICCapability{Name: "pause-frame-use", IsEnabled: false, CanEnable: false} + + apfu, err := util.ParseBool(strings.Join(m["Advertised pause frame use"], "")) + if apfu && err == nil { + pauseFrameUse.IsEnabled = true + } + + spfu, err := util.ParseBool(strings.Join(m["Supports pause frame use"], "")) + if spfu && err == nil { + pauseFrameUse.CanEnable = true + } + + return &pauseFrameUse +} + +func parseNicAttrEthtool(out *bytes.Buffer) map[string][]string { + // The out variable will now contain something that looks like the + // following. + // + //Settings for eth0: + // Supported ports: [ TP ] + // Supported link modes: 10baseT/Half 10baseT/Full + // 100baseT/Half 100baseT/Full + // 1000baseT/Full + // Supported pause frame use: No + // Supports auto-negotiation: Yes + // Supported FEC modes: Not reported + // Advertised link modes: 10baseT/Half 10baseT/Full + // 100baseT/Half 100baseT/Full + // 1000baseT/Full + // Advertised pause frame use: No + // Advertised auto-negotiation: Yes + // Advertised FEC modes: Not reported + // Speed: 1000Mb/s + // Duplex: Full + // Auto-negotiation: on + // Port: Twisted Pair + // PHYAD: 1 + // Transceiver: internal + // MDI-X: off (auto) + // Supports Wake-on: pumbg + // Wake-on: d + // Current message level: 0x00000007 (7) + // drv probe link + // Link detected: yes + + scanner := bufio.NewScanner(out) + // Skip the first line + scanner.Scan() + m := make(map[string][]string) + var name string + for scanner.Scan() { + var fields []string + if strings.Contains(scanner.Text(), ":") { + line := strings.Split(scanner.Text(), ":") + name = strings.TrimSpace(line[0]) + str := strings.Trim(strings.TrimSpace(line[1]), "[]") + switch str { + case + "Not reported", + "Unknown": + continue + } + fields = strings.Fields(str) + } else { + fields = strings.Fields(strings.Trim(strings.TrimSpace(scanner.Text()), "[]")) + } + + for _, f := range fields { + m[name] = append(m[name], strings.TrimSpace(f)) + } + } + + return m +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go new file mode 100644 index 00000000..c8dfa090 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package net + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("netFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go new file mode 100644 index 00000000..7efc0946 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go @@ -0,0 +1,74 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package net + +import ( + "strings" + + "github.com/StackExchange/wmi" +) + +const wqlNetworkAdapter = "SELECT Description, DeviceID, Index, InterfaceIndex, MACAddress, Manufacturer, Name, NetConnectionID, ProductName, ServiceName, PhysicalAdapter FROM Win32_NetworkAdapter" + +type win32NetworkAdapter struct { + Description *string + DeviceID *string + Index *uint32 + InterfaceIndex *uint32 + MACAddress *string + Manufacturer *string + Name *string + NetConnectionID *string + ProductName *string + ServiceName *string + PhysicalAdapter *bool +} + +func (i *Info) load() error { + // Getting info from WMI + var win32NetDescriptions []win32NetworkAdapter + if err := wmi.Query(wqlNetworkAdapter, &win32NetDescriptions); err != nil { + return err + } + + i.NICs = nics(win32NetDescriptions) + return nil +} + +func nics(win32NetDescriptions []win32NetworkAdapter) []*NIC { + // Converting into standard structures + nics := make([]*NIC, 0) + for _, nicDescription := range win32NetDescriptions { + nic := &NIC{ + Name: netDeviceName(nicDescription), + MacAddress: *nicDescription.MACAddress, + MACAddress: *nicDescription.MACAddress, + IsVirtual: netIsVirtual(nicDescription), + Capabilities: []*NICCapability{}, + } + nics = append(nics, nic) + } + + return nics +} + +func netDeviceName(description win32NetworkAdapter) string { + var name string + if strings.TrimSpace(*description.NetConnectionID) != "" { + name = *description.NetConnectionID + " - " + *description.Description + } else { + name = *description.Description + } + return name +} + +func netIsVirtual(description win32NetworkAdapter) bool { + if description.PhysicalAdapter == nil { + return false + } + + return !(*description.PhysicalAdapter) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/option/option.go b/vendor/github.com/jaypipes/ghw/pkg/option/option.go new file mode 100644 index 00000000..7ce14016 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/option/option.go @@ -0,0 +1,258 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package option + +import ( + "io" + "log" + "os" +) + +const ( + DefaultChroot = "/" +) + +const ( + envKeyChroot = "GHW_CHROOT" + envKeyDisableWarnings = "GHW_DISABLE_WARNINGS" + envKeyDisableTools = "GHW_DISABLE_TOOLS" + envKeySnapshotPath = "GHW_SNAPSHOT_PATH" + envKeySnapshotRoot = "GHW_SNAPSHOT_ROOT" + envKeySnapshotExclusive = "GHW_SNAPSHOT_EXCLUSIVE" + envKeySnapshotPreserve = "GHW_SNAPSHOT_PRESERVE" +) + +// Alerter emits warnings about undesirable but recoverable errors. +// We use a subset of a logger interface only to emit warnings, and +// `Warninger` sounded ugly. +type Alerter interface { + Printf(format string, v ...interface{}) +} + +var ( + NullAlerter = log.New(io.Discard, "", 0) +) + +// EnvOrDefaultAlerter returns the default instance ghw will use to emit +// its warnings. ghw will emit warnings to stderr by default unless the +// environs variable GHW_DISABLE_WARNINGS is specified; in the latter case +// all warning will be suppressed. +func EnvOrDefaultAlerter() Alerter { + var dest io.Writer + if _, exists := os.LookupEnv(envKeyDisableWarnings); exists { + dest = io.Discard + } else { + // default + dest = os.Stderr + } + return log.New(dest, "", 0) +} + +// EnvOrDefaultChroot returns the value of the GHW_CHROOT environs variable or +// the default value of "/" if not set +func EnvOrDefaultChroot() string { + // Grab options from the environs by default + if val, exists := os.LookupEnv(envKeyChroot); exists { + return val + } + return DefaultChroot +} + +// EnvOrDefaultSnapshotPath returns the value of the GHW_SNAPSHOT_PATH environs variable +// or the default value of "" (disable snapshot consumption) if not set +func EnvOrDefaultSnapshotPath() string { + if val, exists := os.LookupEnv(envKeySnapshotPath); exists { + return val + } + return "" // default is no snapshot +} + +// EnvOrDefaultSnapshotRoot returns the value of the the GHW_SNAPSHOT_ROOT environs variable +// or the default value of "" (self-manage the snapshot unpack directory, if relevant) if not set +func EnvOrDefaultSnapshotRoot() string { + if val, exists := os.LookupEnv(envKeySnapshotRoot); exists { + return val + } + return "" // default is to self-manage the snapshot directory +} + +// EnvOrDefaultSnapshotExclusive returns the value of the GHW_SNAPSHOT_EXCLUSIVE environs variable +// or the default value of false if not set +func EnvOrDefaultSnapshotExclusive() bool { + if _, exists := os.LookupEnv(envKeySnapshotExclusive); exists { + return true + } + return false +} + +// EnvOrDefaultSnapshotPreserve returns the value of the GHW_SNAPSHOT_PRESERVE environs variable +// or the default value of false if not set +func EnvOrDefaultSnapshotPreserve() bool { + if _, exists := os.LookupEnv(envKeySnapshotPreserve); exists { + return true + } + return false +} + +// EnvOrDefaultTools return true if ghw should use external tools to augment the data collected +// from sysfs. Most users want to do this most of time, so this is enabled by default. +// Users consuming snapshots may want to opt out, thus they can set the GHW_DISABLE_TOOLS +// environs variable to any value to make ghw skip calling external tools even if they are available. +func EnvOrDefaultTools() bool { + if _, exists := os.LookupEnv(envKeyDisableTools); exists { + return false + } + return true +} + +// Option is used to represent optionally-configured settings. Each field is a +// pointer to some concrete value so that we can tell when something has been +// set or left unset. +type Option struct { + // To facilitate querying of sysfs filesystems that are bind-mounted to a + // non-default root mountpoint, we allow users to set the GHW_CHROOT environ + // variable to an alternate mountpoint. For instance, assume that the user of + // ghw is a Golang binary being executed from an application container that has + // certain host filesystems bind-mounted into the container at /host. The user + // would ensure the GHW_CHROOT environ variable is set to "/host" and ghw will + // build its paths from that location instead of / + Chroot *string + + // Snapshot contains options for handling ghw snapshots + Snapshot *SnapshotOptions + + // Alerter contains the target for ghw warnings + Alerter Alerter + + // EnableTools optionally request ghw to not call any external program to learn + // about the hardware. The default is to use such tools if available. + EnableTools *bool + + // PathOverrides optionally allows to override the default paths ghw uses internally + // to learn about the system resources. + PathOverrides PathOverrides + + // Context may contain a pointer to a `Context` struct that is constructed + // during a call to the `context.WithContext` function. Only used internally. + // This is an interface to get around recursive package import issues. + Context interface{} +} + +// SnapshotOptions contains options for handling of ghw snapshots +type SnapshotOptions struct { + // Path allows users to specify a snapshot (captured using ghw-snapshot) to be + // automatically consumed. Users need to supply the path of the snapshot, and + // ghw will take care of unpacking it on a temporary directory. + // Set the environment variable "GHW_SNAPSHOT_PRESERVE" to make ghw skip the cleanup + // stage and keep the unpacked snapshot in the temporary directory. + Path string + // Root is the directory on which the snapshot must be unpacked. This allows + // the users to manage their snapshot directory instead of ghw doing that on + // their behalf. Relevant only if SnapshotPath is given. + Root *string + // Exclusive tells ghw if the given directory should be considered of exclusive + // usage of ghw or not If the user provides a Root. If the flag is set, ghw will + // unpack the snapshot in the given SnapshotRoot iff the directory is empty; otherwise + // any existing content will be left untouched and the unpack stage will exit silently. + // As additional side effect, give both this option and SnapshotRoot to make each + // context try to unpack the snapshot only once. + Exclusive bool +} + +// WithChroot allows to override the root directory ghw uses. +func WithChroot(dir string) *Option { + return &Option{Chroot: &dir} +} + +// WithSnapshot sets snapshot-processing options for a ghw run +func WithSnapshot(opts SnapshotOptions) *Option { + return &Option{ + Snapshot: &opts, + } +} + +// WithAlerter sets alerting options for ghw +func WithAlerter(alerter Alerter) *Option { + return &Option{ + Alerter: alerter, + } +} + +// WithNullAlerter sets No-op alerting options for ghw +func WithNullAlerter() *Option { + return &Option{ + Alerter: NullAlerter, + } +} + +// WithDisableTools sets enables or prohibts ghw to call external tools to discover hardware capabilities. +func WithDisableTools() *Option { + false_ := false + return &Option{EnableTools: &false_} +} + +// PathOverrides is a map, keyed by the string name of a mount path, of override paths +type PathOverrides map[string]string + +// WithPathOverrides supplies path-specific overrides for the context +func WithPathOverrides(overrides PathOverrides) *Option { + return &Option{ + PathOverrides: overrides, + } +} + +// There is intentionally no Option related to GHW_SNAPSHOT_PRESERVE because we see that as +// a debug/troubleshoot aid more something users wants to do regularly. +// Hence we allow that only via the environment variable for the time being. + +// Merge accepts one or more Options and merges them together, returning the +// merged Option +func Merge(opts ...*Option) *Option { + merged := &Option{} + for _, opt := range opts { + if opt.Chroot != nil { + merged.Chroot = opt.Chroot + } + if opt.Snapshot != nil { + merged.Snapshot = opt.Snapshot + } + if opt.Alerter != nil { + merged.Alerter = opt.Alerter + } + if opt.EnableTools != nil { + merged.EnableTools = opt.EnableTools + } + // intentionally only programmatically + if opt.PathOverrides != nil { + merged.PathOverrides = opt.PathOverrides + } + if opt.Context != nil { + merged.Context = opt.Context + } + } + // Set the default value if missing from mergeOpts + if merged.Chroot == nil { + chroot := EnvOrDefaultChroot() + merged.Chroot = &chroot + } + if merged.Alerter == nil { + merged.Alerter = EnvOrDefaultAlerter() + } + if merged.Snapshot == nil { + snapRoot := EnvOrDefaultSnapshotRoot() + merged.Snapshot = &SnapshotOptions{ + Path: EnvOrDefaultSnapshotPath(), + Root: &snapRoot, + Exclusive: EnvOrDefaultSnapshotExclusive(), + } + } + if merged.EnableTools == nil { + enabled := EnvOrDefaultTools() + merged.EnableTools = &enabled + } + return merged +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go new file mode 100644 index 00000000..660238c2 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go @@ -0,0 +1,54 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package address + +import ( + "regexp" + "strings" +) + +var ( + regexAddress *regexp.Regexp = regexp.MustCompile( + `^((1?[0-9a-f]{0,4}):)?([0-9a-f]{2}):([0-9a-f]{2})\.([0-9a-f]{1})$`, + ) +) + +// Address contains the components of a PCI Address +type Address struct { + Domain string + Bus string + Device string + Function string +} + +// String() returns the canonical [D]BDF representation of this Address +func (addr *Address) String() string { + return addr.Domain + ":" + addr.Bus + ":" + addr.Device + "." + addr.Function +} + +// FromString returns [Address] from an address string in either +// $BUS:$DEVICE.$FUNCTION (BDF) format or a full PCI address that +// includes the domain: $DOMAIN:$BUS:$DEVICE.$FUNCTION. +// +// If the address string isn't a valid PCI address, then nil is returned. +func FromString(address string) *Address { + addrLowered := strings.ToLower(address) + matches := regexAddress.FindStringSubmatch(addrLowered) + if len(matches) == 6 { + dom := "0000" + if matches[1] != "" { + dom = matches[2] + } + return &Address{ + Domain: dom, + Bus: matches[3], + Device: matches[4], + Function: matches[5], + } + } + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go new file mode 100644 index 00000000..55cc1eac --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go @@ -0,0 +1,193 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pci + +import ( + "encoding/json" + "fmt" + + "github.com/jaypipes/pcidb" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/topology" + "github.com/jaypipes/ghw/pkg/util" +) + +type Device struct { + // The PCI address of the device + Address string `json:"address"` + Vendor *pcidb.Vendor `json:"vendor"` + Product *pcidb.Product `json:"product"` + Revision string `json:"revision"` + Subsystem *pcidb.Product `json:"subsystem"` + // optional subvendor/sub-device information + Class *pcidb.Class `json:"class"` + // optional sub-class for the device + Subclass *pcidb.Subclass `json:"subclass"` + // optional programming interface + ProgrammingInterface *pcidb.ProgrammingInterface `json:"programming_interface"` + // Topology node that the PCI device is affined to. Will be nil if the + // architecture is not NUMA. + Node *topology.Node `json:"node,omitempty"` + Driver string `json:"driver"` +} + +type devIdent struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type devMarshallable struct { + Driver string `json:"driver"` + Address string `json:"address"` + Vendor devIdent `json:"vendor"` + Product devIdent `json:"product"` + Revision string `json:"revision"` + Subsystem devIdent `json:"subsystem"` + Class devIdent `json:"class"` + Subclass devIdent `json:"subclass"` + Interface devIdent `json:"programming_interface"` +} + +// NOTE(jaypipes) Device has a custom JSON marshaller because we don't want +// to serialize the entire PCIDB information for the Vendor (which includes all +// of the vendor's products, etc). Instead, we simply serialize the ID and +// human-readable name of the vendor, product, class, etc. +func (d *Device) MarshalJSON() ([]byte, error) { + dm := devMarshallable{ + Driver: d.Driver, + Address: d.Address, + Vendor: devIdent{ + ID: d.Vendor.ID, + Name: d.Vendor.Name, + }, + Product: devIdent{ + ID: d.Product.ID, + Name: d.Product.Name, + }, + Revision: d.Revision, + Subsystem: devIdent{ + ID: d.Subsystem.ID, + Name: d.Subsystem.Name, + }, + Class: devIdent{ + ID: d.Class.ID, + Name: d.Class.Name, + }, + Subclass: devIdent{ + ID: d.Subclass.ID, + Name: d.Subclass.Name, + }, + Interface: devIdent{ + ID: d.ProgrammingInterface.ID, + Name: d.ProgrammingInterface.Name, + }, + } + return json.Marshal(dm) +} + +func (d *Device) String() string { + vendorName := util.UNKNOWN + if d.Vendor != nil { + vendorName = d.Vendor.Name + } + productName := util.UNKNOWN + if d.Product != nil { + productName = d.Product.Name + } + className := util.UNKNOWN + if d.Class != nil { + className = d.Class.Name + } + return fmt.Sprintf( + "%s -> driver: '%s' class: '%s' vendor: '%s' product: '%s'", + d.Address, + d.Driver, + className, + vendorName, + productName, + ) +} + +type Info struct { + db *pcidb.PCIDB + arch topology.Architecture + ctx *context.Context + // All PCI devices on the host system + Devices []*Device +} + +func (i *Info) String() string { + return fmt.Sprintf("PCI (%d devices)", len(i.Devices)) +} + +// New returns a pointer to an Info struct that contains information about the +// PCI devices on the host system +func New(opts ...*option.Option) (*Info, error) { + merged := option.Merge(opts...) + ctx := context.New(merged) + // by default we don't report NUMA information; + // we will only if are sure we are running on NUMA architecture + info := &Info{ + arch: topology.ArchitectureSMP, + ctx: ctx, + } + + // we do this trick because we need to make sure ctx.Setup() gets + // a chance to run before any subordinate package is created reusing + // our context. + loadDetectingTopology := func() error { + topo, err := topology.New(context.WithContext(ctx)) + if err == nil { + info.arch = topo.Architecture + } else { + ctx.Warn("error detecting system topology: %v", err) + } + return info.load() + } + + var err error + if context.Exists(merged) { + err = loadDetectingTopology() + } else { + err = ctx.Do(loadDetectingTopology) + } + if err != nil { + return nil, err + } + return info, nil +} + +// lookupDevice gets a device from cached data +func (info *Info) lookupDevice(address string) *Device { + for _, dev := range info.Devices { + if dev.Address == address { + return dev + } + } + return nil +} + +// simple private struct used to encapsulate PCI information in a top-level +// "pci" YAML/JSON map/object key +type pciPrinter struct { + Info *Info `json:"pci"` +} + +// YAMLString returns a string with the PCI information formatted as YAML +// under a top-level "pci:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, pciPrinter{i}) +} + +// JSONString returns a string with the PCI information formatted as JSON +// under a top-level "pci:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, pciPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go new file mode 100644 index 00000000..a9616687 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go @@ -0,0 +1,412 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pci + +import ( + "os" + "path/filepath" + "strings" + + "github.com/jaypipes/pcidb" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/option" + pciaddr "github.com/jaypipes/ghw/pkg/pci/address" + "github.com/jaypipes/ghw/pkg/topology" + "github.com/jaypipes/ghw/pkg/util" +) + +const ( + // found running `wc` against real linux systems + modAliasExpectedLength = 54 +) + +func (i *Info) load() error { + // when consuming snapshots - most notably, but not only, in tests, + // the context pkg forces the chroot value to the unpacked snapshot root. + // This is intentional, intentionally transparent and ghw is prepared to handle this case. + // However, `pcidb` is not. It doesn't know about ghw snaphots, nor it should. + // so we need to complicate things a bit. If the user explicitely supplied + // a chroot option, then we should honor it all across the stack, and passing down + // the chroot to pcidb is the right thing to do. If, however, the chroot was + // implcitely set by snapshot support, then this must be consumed by ghw only. + // In this case we should NOT pass it down to pcidb. + chroot := i.ctx.Chroot + if i.ctx.SnapshotPath != "" { + chroot = option.DefaultChroot + } + db, err := pcidb.New(pcidb.WithChroot(chroot)) + if err != nil { + return err + } + i.db = db + i.Devices = i.getDevices() + return nil +} + +func getDeviceModaliasPath(ctx *context.Context, pciAddr *pciaddr.Address) string { + paths := linuxpath.New(ctx) + return filepath.Join( + paths.SysBusPciDevices, + pciAddr.String(), + "modalias", + ) +} + +func getDeviceRevision(ctx *context.Context, pciAddr *pciaddr.Address) string { + paths := linuxpath.New(ctx) + revisionPath := filepath.Join( + paths.SysBusPciDevices, + pciAddr.String(), + "revision", + ) + + if _, err := os.Stat(revisionPath); err != nil { + return "" + } + revision, err := os.ReadFile(revisionPath) + if err != nil { + return "" + } + return strings.TrimSpace(string(revision)) +} + +func getDeviceNUMANode(ctx *context.Context, pciAddr *pciaddr.Address) *topology.Node { + paths := linuxpath.New(ctx) + numaNodePath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "numa_node") + + if _, err := os.Stat(numaNodePath); err != nil { + return nil + } + + nodeIdx := util.SafeIntFromFile(ctx, numaNodePath) + if nodeIdx == -1 { + return nil + } + + return &topology.Node{ + ID: nodeIdx, + } +} + +func getDeviceDriver(ctx *context.Context, pciAddr *pciaddr.Address) string { + paths := linuxpath.New(ctx) + driverPath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "driver") + + if _, err := os.Stat(driverPath); err != nil { + return "" + } + + dest, err := os.Readlink(driverPath) + if err != nil { + return "" + } + return filepath.Base(dest) +} + +type deviceModaliasInfo struct { + vendorID string + productID string + subproductID string + subvendorID string + classID string + subclassID string + progIfaceID string +} + +func parseModaliasFile(fp string) *deviceModaliasInfo { + if _, err := os.Stat(fp); err != nil { + return nil + } + data, err := os.ReadFile(fp) + if err != nil { + return nil + } + + return parseModaliasData(string(data)) +} + +func parseModaliasData(data string) *deviceModaliasInfo { + // extra sanity check to avoid segfaults. We actually expect + // the data to be exactly long `modAliasExpectedlength`, but + // we will happily ignore any extra data we don't know how to + // handle. + if len(data) < modAliasExpectedLength { + return nil + } + // The modalias file is an encoded file that looks like this: + // + // $ cat /sys/devices/pci0000\:00/0000\:00\:03.0/0000\:03\:00.0/modalias + // pci:v000010DEd00001C82sv00001043sd00008613bc03sc00i00 + // + // It is interpreted like so: + // + // pci: -- ignore + // v000010DE -- PCI vendor ID + // d00001C82 -- PCI device ID (the product/model ID) + // sv00001043 -- PCI subsystem vendor ID + // sd00008613 -- PCI subsystem device ID (subdevice product/model ID) + // bc03 -- PCI base class + // sc00 -- PCI subclass + // i00 -- programming interface + vendorID := strings.ToLower(data[9:13]) + productID := strings.ToLower(data[18:22]) + subvendorID := strings.ToLower(data[28:32]) + subproductID := strings.ToLower(data[38:42]) + classID := strings.ToLower(data[44:46]) + subclassID := strings.ToLower(data[48:50]) + progIfaceID := strings.ToLower(data[51:53]) + return &deviceModaliasInfo{ + vendorID: vendorID, + productID: productID, + subproductID: subproductID, + subvendorID: subvendorID, + classID: classID, + subclassID: subclassID, + progIfaceID: progIfaceID, + } +} + +// Returns a pointer to a pcidb.Vendor struct matching the supplied vendor +// ID string. If no such vendor ID string could be found, returns the +// pcidb.Vendor struct populated with "unknown" vendor Name attribute and +// empty Products attribute. +func findPCIVendor(info *Info, vendorID string) *pcidb.Vendor { + vendor := info.db.Vendors[vendorID] + if vendor == nil { + return &pcidb.Vendor{ + ID: vendorID, + Name: util.UNKNOWN, + Products: []*pcidb.Product{}, + } + } + return vendor +} + +// Returns a pointer to a pcidb.Product struct matching the supplied vendor +// and product ID strings. If no such product could be found, returns the +// pcidb.Product struct populated with "unknown" product Name attribute and +// empty Subsystems attribute. +func findPCIProduct( + info *Info, + vendorID string, + productID string, +) *pcidb.Product { + product := info.db.Products[vendorID+productID] + if product == nil { + return &pcidb.Product{ + ID: productID, + Name: util.UNKNOWN, + Subsystems: []*pcidb.Product{}, + } + } + return product +} + +// Returns a pointer to a pcidb.Product struct matching the supplied vendor, +// product, subvendor and subproduct ID strings. If no such product could be +// found, returns the pcidb.Product struct populated with "unknown" product +// Name attribute and empty Subsystems attribute. +func findPCISubsystem( + info *Info, + vendorID string, + productID string, + subvendorID string, + subproductID string, +) *pcidb.Product { + product := info.db.Products[vendorID+productID] + subvendor := info.db.Vendors[subvendorID] + if subvendor != nil && product != nil { + for _, p := range product.Subsystems { + if p.ID == subproductID { + return p + } + } + } + return &pcidb.Product{ + VendorID: subvendorID, + ID: subproductID, + Name: util.UNKNOWN, + } +} + +// Returns a pointer to a pcidb.Class struct matching the supplied class ID +// string. If no such class ID string could be found, returns the +// pcidb.Class struct populated with "unknown" class Name attribute and +// empty Subclasses attribute. +func findPCIClass(info *Info, classID string) *pcidb.Class { + class := info.db.Classes[classID] + if class == nil { + return &pcidb.Class{ + ID: classID, + Name: util.UNKNOWN, + Subclasses: []*pcidb.Subclass{}, + } + } + return class +} + +// Returns a pointer to a pcidb.Subclass struct matching the supplied class +// and subclass ID strings. If no such subclass could be found, returns the +// pcidb.Subclass struct populated with "unknown" subclass Name attribute +// and empty ProgrammingInterfaces attribute. +func findPCISubclass( + info *Info, + classID string, + subclassID string, +) *pcidb.Subclass { + class := info.db.Classes[classID] + if class != nil { + for _, sc := range class.Subclasses { + if sc.ID == subclassID { + return sc + } + } + } + return &pcidb.Subclass{ + ID: subclassID, + Name: util.UNKNOWN, + ProgrammingInterfaces: []*pcidb.ProgrammingInterface{}, + } +} + +// Returns a pointer to a pcidb.ProgrammingInterface struct matching the +// supplied class, subclass and programming interface ID strings. If no such +// programming interface could be found, returns the +// pcidb.ProgrammingInterface struct populated with "unknown" Name attribute +func findPCIProgrammingInterface( + info *Info, + classID string, + subclassID string, + progIfaceID string, +) *pcidb.ProgrammingInterface { + subclass := findPCISubclass(info, classID, subclassID) + for _, pi := range subclass.ProgrammingInterfaces { + if pi.ID == progIfaceID { + return pi + } + } + return &pcidb.ProgrammingInterface{ + ID: progIfaceID, + Name: util.UNKNOWN, + } +} + +// GetDevice returns a pointer to a Device struct that describes the PCI +// device at the requested address. If no such device could be found, returns nil. +func (info *Info) GetDevice(address string) *Device { + // check cached data first + if dev := info.lookupDevice(address); dev != nil { + return dev + } + + pciAddr := pciaddr.FromString(address) + if pciAddr == nil { + info.ctx.Warn("error parsing the pci address %q", address) + return nil + } + + // no cached data, let's get the information from system. + fp := getDeviceModaliasPath(info.ctx, pciAddr) + if fp == "" { + info.ctx.Warn("error finding modalias info for device %q", address) + return nil + } + + modaliasInfo := parseModaliasFile(fp) + if modaliasInfo == nil { + info.ctx.Warn("error parsing modalias info for device %q", address) + return nil + } + + device := info.getDeviceFromModaliasInfo(address, modaliasInfo) + device.Revision = getDeviceRevision(info.ctx, pciAddr) + if info.arch == topology.ArchitectureNUMA { + device.Node = getDeviceNUMANode(info.ctx, pciAddr) + } + device.Driver = getDeviceDriver(info.ctx, pciAddr) + return device +} + +// ParseDevice returns a pointer to a Device given its describing data. +// The PCI device obtained this way may not exist in the system; +// use GetDevice to get a *Device which is found in the system +func (info *Info) ParseDevice(address, modalias string) *Device { + modaliasInfo := parseModaliasData(modalias) + if modaliasInfo == nil { + return nil + } + return info.getDeviceFromModaliasInfo(address, modaliasInfo) +} + +func (info *Info) getDeviceFromModaliasInfo( + address string, + modaliasInfo *deviceModaliasInfo, +) *Device { + vendor := findPCIVendor(info, modaliasInfo.vendorID) + product := findPCIProduct( + info, + modaliasInfo.vendorID, + modaliasInfo.productID, + ) + subsystem := findPCISubsystem( + info, + modaliasInfo.vendorID, + modaliasInfo.productID, + modaliasInfo.subvendorID, + modaliasInfo.subproductID, + ) + class := findPCIClass(info, modaliasInfo.classID) + subclass := findPCISubclass( + info, + modaliasInfo.classID, + modaliasInfo.subclassID, + ) + progIface := findPCIProgrammingInterface( + info, + modaliasInfo.classID, + modaliasInfo.subclassID, + modaliasInfo.progIfaceID, + ) + + return &Device{ + Address: address, + Vendor: vendor, + Subsystem: subsystem, + Product: product, + Class: class, + Subclass: subclass, + ProgrammingInterface: progIface, + } +} + +// getDevices returns a list of pointers to Device structs present on the +// host system +func (info *Info) getDevices() []*Device { + paths := linuxpath.New(info.ctx) + devs := make([]*Device, 0) + // We scan the /sys/bus/pci/devices directory which contains a collection + // of symlinks. The names of the symlinks are all the known PCI addresses + // for the host. For each address, we grab a *Device matching the + // address and append to the returned array. + links, err := os.ReadDir(paths.SysBusPciDevices) + if err != nil { + info.ctx.Warn("failed to read /sys/bus/pci/devices") + return nil + } + var dev *Device + for _, link := range links { + addr := link.Name() + dev = info.GetDevice(addr) + if dev == nil { + info.ctx.Warn("failed to get device information for PCI address %s", addr) + } else { + devs = append(devs, dev) + } + } + return devs +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go new file mode 100644 index 00000000..9ebb396d --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go @@ -0,0 +1,32 @@ +//go:build !linux +// +build !linux + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pci + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("pciFillInfo not implemented on " + runtime.GOOS) +} + +// GetDevice returns a pointer to a Device struct that describes the PCI +// device at the requested address. If no such device could be found, returns +// nil +func (info *Info) GetDevice(address string) *Device { + return nil +} + +// ListDevices returns a list of pointers to Device structs present on the +// host system +func (info *Info) ListDevices() []*Device { + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product.go b/vendor/github.com/jaypipes/ghw/pkg/product/product.go new file mode 100644 index 00000000..83d6541d --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/product/product.go @@ -0,0 +1,96 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package product + +import ( + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/option" + "github.com/jaypipes/ghw/pkg/util" +) + +// Info defines product information +type Info struct { + ctx *context.Context + Family string `json:"family"` + Name string `json:"name"` + Vendor string `json:"vendor"` + SerialNumber string `json:"serial_number"` + UUID string `json:"uuid"` + SKU string `json:"sku"` + Version string `json:"version"` +} + +func (i *Info) String() string { + familyStr := "" + if i.Family != "" { + familyStr = " family=" + i.Family + } + nameStr := "" + if i.Name != "" { + nameStr = " name=" + i.Name + } + vendorStr := "" + if i.Vendor != "" { + vendorStr = " vendor=" + i.Vendor + } + serialStr := "" + if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN { + serialStr = " serial=" + i.SerialNumber + } + uuidStr := "" + if i.UUID != "" && i.UUID != util.UNKNOWN { + uuidStr = " uuid=" + i.UUID + } + skuStr := "" + if i.SKU != "" { + skuStr = " sku=" + i.SKU + } + versionStr := "" + if i.Version != "" { + versionStr = " version=" + i.Version + } + + return "product" + util.ConcatStrings( + familyStr, + nameStr, + vendorStr, + serialStr, + uuidStr, + skuStr, + versionStr, + ) +} + +// New returns a pointer to a Info struct containing information +// about the host's product +func New(opts ...*option.Option) (*Info, error) { + ctx := context.New(opts...) + info := &Info{ctx: ctx} + if err := ctx.Do(info.load); err != nil { + return nil, err + } + return info, nil +} + +// simple private struct used to encapsulate product information in a top-level +// "product" YAML/JSON map/object key +type productPrinter struct { + Info *Info `json:"product"` +} + +// YAMLString returns a string with the product information formatted as YAML +// under a top-level "dmi:" key +func (info *Info) YAMLString() string { + return marshal.SafeYAML(info.ctx, productPrinter{info}) +} + +// JSONString returns a string with the product information formatted as JSON +// under a top-level "product:" key +func (info *Info) JSONString(indent bool) string { + return marshal.SafeJSON(info.ctx, productPrinter{info}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go new file mode 100644 index 00000000..36b6b447 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go @@ -0,0 +1,23 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package product + +import ( + "github.com/jaypipes/ghw/pkg/linuxdmi" +) + +func (i *Info) load() error { + + i.Family = linuxdmi.Item(i.ctx, "product_family") + i.Name = linuxdmi.Item(i.ctx, "product_name") + i.Vendor = linuxdmi.Item(i.ctx, "sys_vendor") + i.SerialNumber = linuxdmi.Item(i.ctx, "product_serial") + i.UUID = linuxdmi.Item(i.ctx, "product_uuid") + i.SKU = linuxdmi.Item(i.ctx, "product_sku") + i.Version = linuxdmi.Item(i.ctx, "product_version") + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go new file mode 100644 index 00000000..8fc9724f --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package product + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("productFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go new file mode 100644 index 00000000..c919cb0f --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go @@ -0,0 +1,45 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package product + +import ( + "github.com/StackExchange/wmi" + + "github.com/jaypipes/ghw/pkg/util" +) + +const wqlProduct = "SELECT Caption, Description, IdentifyingNumber, Name, SKUNumber, Vendor, Version, UUID FROM Win32_ComputerSystemProduct" + +type win32Product struct { + Caption *string + Description *string + IdentifyingNumber *string + Name *string + SKUNumber *string + Vendor *string + Version *string + UUID *string +} + +func (i *Info) load() error { + // Getting data from WMI + var win32ProductDescriptions []win32Product + // Assuming the first product is the host... + if err := wmi.Query(wqlProduct, &win32ProductDescriptions); err != nil { + return err + } + if len(win32ProductDescriptions) > 0 { + i.Family = util.UNKNOWN + i.Name = *win32ProductDescriptions[0].Name + i.Vendor = *win32ProductDescriptions[0].Vendor + i.SerialNumber = *win32ProductDescriptions[0].IdentifyingNumber + i.UUID = *win32ProductDescriptions[0].UUID + i.SKU = *win32ProductDescriptions[0].SKUNumber + i.Version = *win32ProductDescriptions[0].Version + } + + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go new file mode 100644 index 00000000..020e7e67 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go @@ -0,0 +1,198 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// Attempting to tar up pseudofiles like /proc/cpuinfo is an exercise in +// futility. Notably, the pseudofiles, when read by syscalls, do not return the +// number of bytes read. This causes the tar writer to write zero-length files. +// +// Instead, it is necessary to build a directory structure in a tmpdir and +// create actual files with copies of the pseudofile contents + +// CloneTreeInto copies all the pseudofiles that ghw will consume into the root +// `scratchDir`, preserving the hieratchy. +func CloneTreeInto(scratchDir string) error { + err := setupScratchDir(scratchDir) + if err != nil { + return err + } + fileSpecs := ExpectedCloneContent() + return CopyFilesInto(fileSpecs, scratchDir, nil) +} + +// ExpectedCloneContent return a slice of glob patterns which represent the pseudofiles +// ghw cares about. +// The intended usage of this function is to validate a clone tree, checking that the +// content matches the expectations. +// Beware: the content is host-specific, because the content pertaining some subsystems, +// most notably PCI, is host-specific and unpredictable. +func ExpectedCloneContent() []string { + fileSpecs := ExpectedCloneStaticContent() + fileSpecs = append(fileSpecs, ExpectedCloneNetContent()...) + fileSpecs = append(fileSpecs, ExpectedClonePCIContent()...) + fileSpecs = append(fileSpecs, ExpectedCloneGPUContent()...) + return fileSpecs +} + +// ValidateClonedTree checks the content of a cloned tree, whose root is `clonedDir`, +// against a slice of glob specs which must be included in the cloned tree. +// Is not wrong, and this functions doesn't enforce this, that the cloned tree includes +// more files than the necessary; ghw will just ignore the files it doesn't care about. +// Returns a slice of glob patters expected (given) but not found in the cloned tree, +// and the error during the validation (if any). +func ValidateClonedTree(fileSpecs []string, clonedDir string) ([]string, error) { + missing := []string{} + for _, fileSpec := range fileSpecs { + matches, err := filepath.Glob(filepath.Join(clonedDir, fileSpec)) + if err != nil { + return missing, err + } + if len(matches) == 0 { + missing = append(missing, fileSpec) + } + } + return missing, nil +} + +// CopyFileOptions allows to finetune the behaviour of the CopyFilesInto function +type CopyFileOptions struct { + // IsSymlinkFn allows to control the behaviour when handling a symlink. + // If this hook returns true, the source file is treated as symlink: the cloned + // tree will thus contain a symlink, with its path adjusted to match the relative + // path inside the cloned tree. If return false, the symlink will be deferred. + // The easiest use case of this hook is if you want to avoid symlinks in your cloned + // tree (having duplicated content). In this case you can just add a function + // which always return false. + IsSymlinkFn func(path string, info os.FileInfo) bool + // ShouldCreateDirFn allows to control if empty directories listed as clone + // content should be created or not. When creating snapshots, empty directories + // are most often useless (but also harmless). Because of this, directories are only + // created as side effect of copying the files which are inside, and thus directories + // are never empty. The only notable exception are device driver on linux: in this + // case, for a number of technical/historical reasons, we care about the directory + // name, but not about the files which are inside. + // Hence, this is the only case on which ghw clones empty directories. + ShouldCreateDirFn func(path string, info os.FileInfo) bool +} + +// CopyFilesInto copies all the given glob files specs in the given `destDir` directory, +// preserving the directory structure. This means you can provide a deeply nested filespec +// like +// - /some/deeply/nested/file* +// and you DO NOT need to build the tree incrementally like +// - /some/ +// - /some/deeply/ +// ... +// all glob patterns supported in `filepath.Glob` are supported. +func CopyFilesInto(fileSpecs []string, destDir string, opts *CopyFileOptions) error { + if opts == nil { + opts = &CopyFileOptions{ + IsSymlinkFn: isSymlink, + ShouldCreateDirFn: isDriversDir, + } + } + for _, fileSpec := range fileSpecs { + trace("copying spec: %q\n", fileSpec) + matches, err := filepath.Glob(fileSpec) + if err != nil { + return err + } + if err := copyFileTreeInto(matches, destDir, opts); err != nil { + return err + } + } + return nil +} + +func copyFileTreeInto(paths []string, destDir string, opts *CopyFileOptions) error { + for _, path := range paths { + trace(" copying path: %q\n", path) + baseDir := filepath.Dir(path) + if err := os.MkdirAll(filepath.Join(destDir, baseDir), os.ModePerm); err != nil { + return err + } + + fi, err := os.Lstat(path) + if err != nil { + return err + } + // directories must be listed explicitly and created separately. + // In the future we may want to expose this decision as hook point in + // CopyFileOptions, when clear use cases emerge. + destPath := filepath.Join(destDir, path) + if fi.IsDir() { + if opts.ShouldCreateDirFn(path, fi) { + if err := os.MkdirAll(destPath, os.ModePerm); err != nil { + return err + } + } else { + trace("expanded glob path %q is a directory - skipped\n", path) + } + continue + } + if opts.IsSymlinkFn(path, fi) { + trace(" copying link: %q -> %q\n", path, destPath) + if err := copyLink(path, destPath); err != nil { + return err + } + } else { + trace(" copying file: %q -> %q\n", path, destPath) + if err := copyPseudoFile(path, destPath); err != nil && !errors.Is(err, os.ErrPermission) { + return err + } + } + } + return nil +} + +func isSymlink(path string, fi os.FileInfo) bool { + return fi.Mode()&os.ModeSymlink != 0 +} + +func isDriversDir(path string, fi os.FileInfo) bool { + return strings.Contains(path, "drivers") +} + +func copyLink(path, targetPath string) error { + target, err := os.Readlink(path) + if err != nil { + return err + } + trace(" symlink %q -> %q\n", target, targetPath) + if err := os.Symlink(target, targetPath); err != nil { + if errors.Is(err, os.ErrExist) { + return nil + } + return err + } + + return nil +} + +func copyPseudoFile(path, targetPath string) error { + buf, err := os.ReadFile(path) + if err != nil { + return err + } + trace("creating %s\n", targetPath) + f, err := os.Create(targetPath) + if err != nil { + return err + } + if _, err = f.Write(buf); err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go new file mode 100644 index 00000000..f692d413 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go @@ -0,0 +1,220 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +func createBlockDevices(buildDir string) error { + // Grab all the block device pseudo-directories from /sys/block symlinks + // (excluding loopback devices) and inject them into our build filesystem + // with all but the circular symlink'd subsystem directories + devLinks, err := os.ReadDir("/sys/block") + if err != nil { + return err + } + for _, devLink := range devLinks { + dname := devLink.Name() + if strings.HasPrefix(dname, "loop") { + continue + } + devPath := filepath.Join("/sys/block", dname) + trace("processing block device %q\n", devPath) + + // from the sysfs layout, we know this is always a symlink + linkContentPath, err := os.Readlink(devPath) + if err != nil { + return err + } + trace("link target for block device %q is %q\n", devPath, linkContentPath) + + // Create a symlink in our build filesystem that is a directory + // pointing to the actual device bus path where the block device's + // information directory resides + linkPath := filepath.Join(buildDir, "sys/block", dname) + linkTargetPath := filepath.Join( + buildDir, + "sys/block", + strings.TrimPrefix(linkContentPath, string(os.PathSeparator)), + ) + trace("creating device directory %s\n", linkTargetPath) + if err = os.MkdirAll(linkTargetPath, os.ModePerm); err != nil { + return err + } + + trace("linking device directory %s to %s\n", linkPath, linkContentPath) + // Make sure the link target is a relative path! + // if we use absolute path, the link target will be an absolute path starting + // with buildDir, hence the snapshot will contain broken link. + // Otherwise, the unpack directory will never have the same prefix of buildDir! + if err = os.Symlink(linkContentPath, linkPath); err != nil { + return err + } + // Now read the source block device directory and populate the + // newly-created target link in the build directory with the + // appropriate block device pseudofiles + srcDeviceDir := filepath.Join( + "/sys/block", + strings.TrimPrefix(linkContentPath, string(os.PathSeparator)), + ) + trace("creating device directory %q from %q\n", linkTargetPath, srcDeviceDir) + if err = createBlockDeviceDir(linkTargetPath, srcDeviceDir); err != nil { + return err + } + } + return nil +} + +func createBlockDeviceDir(buildDeviceDir string, srcDeviceDir string) error { + // Populate the supplied directory (in our build filesystem) with all the + // appropriate information pseudofile contents for the block device. + devName := filepath.Base(srcDeviceDir) + devFiles, err := os.ReadDir(srcDeviceDir) + if err != nil { + return err + } + for _, f := range devFiles { + fname := f.Name() + fp := filepath.Join(srcDeviceDir, fname) + fi, err := os.Lstat(fp) + if err != nil { + return err + } + if fi.Mode()&os.ModeSymlink != 0 { + // Ignore any symlinks in the deviceDir since they simply point to + // either self-referential links or information we aren't + // interested in like "subsystem" + continue + } else if fi.IsDir() { + if strings.HasPrefix(fname, devName) { + // We're interested in are the directories that begin with the + // block device name. These are directories with information + // about the partitions on the device + buildPartitionDir := filepath.Join( + buildDeviceDir, fname, + ) + srcPartitionDir := filepath.Join( + srcDeviceDir, fname, + ) + trace("creating partition directory %s\n", buildPartitionDir) + err = os.MkdirAll(buildPartitionDir, os.ModePerm) + if err != nil { + return err + } + err = createPartitionDir(buildPartitionDir, srcPartitionDir) + if err != nil { + return err + } + } + } else if fi.Mode().IsRegular() { + // Regular files in the block device directory are both regular and + // pseudofiles containing information such as the size (in sectors) + // and whether the device is read-only + buf, err := os.ReadFile(fp) + if err != nil { + if errors.Is(err, os.ErrPermission) { + // example: /sys/devices/virtual/block/zram0/compact is 0400 + trace("permission denied reading %q - skipped\n", fp) + continue + } + return err + } + targetPath := filepath.Join(buildDeviceDir, fname) + trace("creating %s\n", targetPath) + f, err := os.Create(targetPath) + if err != nil { + return err + } + if _, err = f.Write(buf); err != nil { + return err + } + f.Close() + } + } + // There is a special file $DEVICE_DIR/queue/rotational that, for some hard + // drives, contains a 1 or 0 indicating whether the device is a spinning + // disk or not + srcQueueDir := filepath.Join( + srcDeviceDir, + "queue", + ) + buildQueueDir := filepath.Join( + buildDeviceDir, + "queue", + ) + err = os.MkdirAll(buildQueueDir, os.ModePerm) + if err != nil { + return err + } + fp := filepath.Join(srcQueueDir, "rotational") + buf, err := os.ReadFile(fp) + if err != nil { + return err + } + targetPath := filepath.Join(buildQueueDir, "rotational") + trace("creating %s\n", targetPath) + f, err := os.Create(targetPath) + if err != nil { + return err + } + if _, err = f.Write(buf); err != nil { + return err + } + f.Close() + + return nil +} + +func createPartitionDir(buildPartitionDir string, srcPartitionDir string) error { + // Populate the supplied directory (in our build filesystem) with all the + // appropriate information pseudofile contents for the partition. + partFiles, err := os.ReadDir(srcPartitionDir) + if err != nil { + return err + } + for _, f := range partFiles { + fname := f.Name() + fp := filepath.Join(srcPartitionDir, fname) + fi, err := os.Lstat(fp) + if err != nil { + return err + } + if fi.Mode()&os.ModeSymlink != 0 { + // Ignore any symlinks in the partition directory since they simply + // point to information we aren't interested in like "subsystem" + continue + } else if fi.IsDir() { + // The subdirectories in the partition directory are not + // interesting for us. They have information about power events and + // traces + continue + } else if fi.Mode().IsRegular() { + // Regular files in the block device directory are both regular and + // pseudofiles containing information such as the size (in sectors) + // and whether the device is read-only + buf, err := os.ReadFile(fp) + if err != nil { + return err + } + targetPath := filepath.Join(buildPartitionDir, fname) + trace("creating %s\n", targetPath) + f, err := os.Create(targetPath) + if err != nil { + return err + } + if _, err = f.Write(buf); err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go new file mode 100644 index 00000000..a26d6b01 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go @@ -0,0 +1,33 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "strings" +) + +// ExpectedCloneGPUContent returns a slice of strings pertaining to the GPU devices ghw +// cares about. We cannot use a static list because we want to grab only the first cardX data +// (see comment in pkg/gpu/gpu_linux.go) +// Additionally, we want to make sure to clone the backing device data. +func ExpectedCloneGPUContent() []string { + cardEntries := []string{ + "device", + } + + filterName := func(cardName string) bool { + if !strings.HasPrefix(cardName, "card") { + return false + } + if strings.ContainsRune(cardName, '-') { + return false + } + return true + } + + return cloneContentByClass("drm", cardEntries, filterName, filterNone) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go new file mode 100644 index 00000000..8191e80c --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go @@ -0,0 +1,109 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "os" + "path/filepath" +) + +func setupScratchDir(scratchDir string) error { + var createPaths = []string{ + "sys/block", + } + + for _, path := range createPaths { + if err := os.MkdirAll(filepath.Join(scratchDir, path), os.ModePerm); err != nil { + return err + } + } + + return createBlockDevices(scratchDir) +} + +// ExpectedCloneStaticContent return a slice of glob patterns which represent the pseudofiles +// ghw cares about, and which are independent from host specific topology or configuration, +// thus are safely represented by a static slice - e.g. they don't need to be discovered at runtime. +func ExpectedCloneStaticContent() []string { + return []string{ + "/proc/cpuinfo", + "/proc/meminfo", + "/proc/self/mounts", + "/sys/devices/system/cpu/cpu*/cache/index*/*", + "/sys/devices/system/cpu/cpu*/topology/*", + "/sys/devices/system/memory/block_size_bytes", + "/sys/devices/system/memory/memory*/online", + "/sys/devices/system/memory/memory*/state", + "/sys/devices/system/node/has_*", + "/sys/devices/system/node/online", + "/sys/devices/system/node/possible", + "/sys/devices/system/node/node*/cpu*", + "/sys/devices/system/node/node*/cpu*/online", + "/sys/devices/system/node/node*/distance", + "/sys/devices/system/node/node*/meminfo", + "/sys/devices/system/node/node*/memory*", + "/sys/devices/system/node/node*/hugepages/hugepages-*/*", + } +} + +type filterFunc func(string) bool + +// cloneContentByClass copies all the content related to a given device class +// (devClass), possibly filtering out devices whose name does NOT pass a +// filter (filterName). Each entry in `/sys/class/$CLASS` is actually a +// symbolic link. We can filter out entries depending on the link target. +// Each filter is a simple function which takes the entry name or the link +// target and must return true if the entry should be collected, false +// otherwise. Last, explicitly collect a list of attributes for each entry, +// given as list of glob patterns as `subEntries`. +// Return the final list of glob patterns to be collected. +func cloneContentByClass(devClass string, subEntries []string, filterName filterFunc, filterLink filterFunc) []string { + var fileSpecs []string + + // warning: don't use the context package here, this means not even the linuxpath package. + // TODO(fromani) remove the path duplication + sysClass := filepath.Join("sys", "class", devClass) + entries, err := os.ReadDir(sysClass) + if err != nil { + // we should not import context, hence we can't Warn() + return fileSpecs + } + for _, entry := range entries { + devName := entry.Name() + + if !filterName(devName) { + continue + } + + devPath := filepath.Join(sysClass, devName) + dest, err := os.Readlink(devPath) + if err != nil { + continue + } + + if !filterLink(dest) { + continue + } + + // so, first copy the symlink itself + fileSpecs = append(fileSpecs, devPath) + // now we have to clone the content of the actual entry + // related (and found into a subdir of) the backing hardware + // device + devData := filepath.Clean(filepath.Join(sysClass, dest)) + for _, subEntry := range subEntries { + fileSpecs = append(fileSpecs, filepath.Join(devData, subEntry)) + } + } + + return fileSpecs +} + +// filterNone allows all content, filtering out none of it +func filterNone(_ string) bool { + return true +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go new file mode 100644 index 00000000..27b27573 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go @@ -0,0 +1,28 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "strings" +) + +// ExpectedCloneNetContent returns a slice of strings pertaning to the network interfaces ghw +// cares about. We cannot use a static list because we want to filter away the virtual devices, +// which ghw doesn't concern itself about. So we need to do some runtime discovery. +// Additionally, we want to make sure to clone the backing device data. +func ExpectedCloneNetContent() []string { + ifaceEntries := []string{ + "addr_assign_type", + // intentionally avoid to clone "address" to avoid to leak any host-idenfifiable data. + } + + filterLink := func(linkDest string) bool { + return !strings.Contains(linkDest, "devices/virtual/net") + } + + return cloneContentByClass("net", ifaceEntries, filterNone, filterLink) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go new file mode 100644 index 00000000..e7aa7d26 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go @@ -0,0 +1,150 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "fmt" + "os" + "path/filepath" + + pciaddr "github.com/jaypipes/ghw/pkg/pci/address" +) + +const ( + // root directory: entry point to start scanning the PCI forest + // warning: don't use the context package here, this means not even the linuxpath package. + // TODO(fromani) remove the path duplication + sysBusPCIDir = "/sys/bus/pci/devices" +) + +// ExpectedClonePCIContent return a slice of glob patterns which represent the pseudofiles +// ghw cares about, pertaining to PCI devices only. +// Beware: the content is host-specific, because the PCI topology is host-dependent and unpredictable. +func ExpectedClonePCIContent() []string { + fileSpecs := []string{ + "/sys/bus/pci/drivers/*", + } + pciRoots := []string{ + sysBusPCIDir, + } + for { + if len(pciRoots) == 0 { + break + } + pciRoot := pciRoots[0] + pciRoots = pciRoots[1:] + specs, roots := scanPCIDeviceRoot(pciRoot) + pciRoots = append(pciRoots, roots...) + fileSpecs = append(fileSpecs, specs...) + } + return fileSpecs +} + +// scanPCIDeviceRoot reports a slice of glob patterns which represent the pseudofiles +// ghw cares about pertaining to all the PCI devices connected to the bus connected from the +// given root; usually (but not always) a CPU packages has 1+ PCI(e) roots, forming the first +// level; more PCI bridges are (usually) attached to this level, creating deep nested trees. +// hence we need to scan all possible roots, to make sure not to miss important devices. +// +// note about notifying errors. This function and its helper functions do use trace() everywhere +// to report recoverable errors, even though it would have been appropriate to use Warn(). +// This is unfortunate, and again a byproduct of the fact we cannot use context.Context to avoid +// circular dependencies. +// TODO(fromani): switch to Warn() as soon as we figure out how to break this circular dep. +func scanPCIDeviceRoot(root string) (fileSpecs []string, pciRoots []string) { + trace("scanning PCI device root %q\n", root) + + perDevEntries := []string{ + "class", + "device", + "driver", + "irq", + "local_cpulist", + "modalias", + "numa_node", + "revision", + "vendor", + } + entries, err := os.ReadDir(root) + if err != nil { + return []string{}, []string{} + } + for _, entry := range entries { + entryName := entry.Name() + if addr := pciaddr.FromString(entryName); addr == nil { + // doesn't look like a entry we care about + // This is by far and large the most likely path + // hence we should NOT trace/warn here. + continue + } + + entryPath := filepath.Join(root, entryName) + pciEntry, err := findPCIEntryFromPath(root, entryName) + if err != nil { + trace("error scanning %q: %v", entryName, err) + continue + } + + trace("PCI entry is %q\n", pciEntry) + fileSpecs = append(fileSpecs, entryPath) + for _, perNetEntry := range perDevEntries { + fileSpecs = append(fileSpecs, filepath.Join(pciEntry, perNetEntry)) + } + + if isPCIBridge(entryPath) { + trace("adding new PCI root %q\n", entryName) + pciRoots = append(pciRoots, pciEntry) + } + } + return fileSpecs, pciRoots +} + +func findPCIEntryFromPath(root, entryName string) (string, error) { + entryPath := filepath.Join(root, entryName) + fi, err := os.Lstat(entryPath) + if err != nil { + return "", fmt.Errorf("stat(%s) failed: %v\n", entryPath, err) + } + if fi.Mode()&os.ModeSymlink == 0 { + // regular file, nothing to resolve + return entryPath, nil + } + // resolve symlink + target, err := os.Readlink(entryPath) + trace("entry %q is symlink resolved to %q\n", entryPath, target) + if err != nil { + return "", fmt.Errorf("readlink(%s) failed: %v - skipped\n", entryPath, err) + } + return filepath.Clean(filepath.Join(root, target)), nil +} + +func isPCIBridge(entryPath string) bool { + subNodes, err := os.ReadDir(entryPath) + if err != nil { + // this is so unlikely we don't even return error. But we trace just in case. + trace("error scanning device entry path %q: %v", entryPath, err) + return false + } + for _, subNode := range subNodes { + if !subNode.IsDir() { + continue + } + if addr := pciaddr.FromString(subNode.Name()); addr != nil { + // we got an entry in the directory pertaining to this device + // which is a directory itself and it is named like a PCI address. + // Hence we infer the device we are considering is a PCI bridge of sorts. + // This is is indeed a bit brutal, but the only possible alternative + // (besides blindly copying everything in /sys/bus/pci/devices) is + // to detect the type of the device and pick only the bridges. + // This approach duplicates the logic within the `pci` subkpg + // - or forces us into awkward dep cycles, and has poorer forward + // compatibility. + return true + } + } + return false +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go new file mode 100644 index 00000000..af85a55b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go @@ -0,0 +1,30 @@ +//go:build !linux +// +build !linux + +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +func setupScratchDir(scratchDir string) error { + return nil +} + +func ExpectedCloneStaticContent() []string { + return []string{} +} + +func ExpectedCloneGPUContent() []string { + return []string{} +} + +func ExpectedCloneNetContent() []string { + return []string{} +} + +func ExpectedClonePCIContent() []string { + return []string{} +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go new file mode 100644 index 00000000..73820c94 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go @@ -0,0 +1,111 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "archive/tar" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" +) + +// PackFrom creates the snapshot named `snapshotName` from the +// directory tree whose root is `sourceRoot`. +func PackFrom(snapshotName, sourceRoot string) error { + f, err := OpenDestination(snapshotName) + if err != nil { + return err + } + defer f.Close() + + return PackWithWriter(f, sourceRoot) +} + +// OpenDestination opens the `snapshotName` file for writing, bailing out +// if the file seems to exist and have existing content already. +// This is done to avoid accidental overwrites. +func OpenDestination(snapshotName string) (*os.File, error) { + f, err := os.OpenFile(snapshotName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + if !errors.Is(err, os.ErrExist) { + return nil, err + } + fs, err := os.Stat(snapshotName) + if err != nil { + return nil, err + } + if fs.Size() > 0 { + return nil, fmt.Errorf("file %s already exists and is of size > 0", snapshotName) + } + f, err = os.OpenFile(snapshotName, os.O_WRONLY, 0600) + if err != nil { + return nil, err + } + } + return f, nil +} + +// PakcWithWriter creates a snapshot sending all the binary data to the +// given `fw` writer. The snapshot is made from the directory tree whose +// root is `sourceRoot`. +func PackWithWriter(fw io.Writer, sourceRoot string) error { + gzw := gzip.NewWriter(fw) + defer gzw.Close() + + tw := tar.NewWriter(gzw) + defer tw.Close() + + return createSnapshot(tw, sourceRoot) +} + +func createSnapshot(tw *tar.Writer, buildDir string) error { + return filepath.Walk(buildDir, func(path string, fi os.FileInfo, _ error) error { + if path == buildDir { + return nil + } + var link string + var err error + + if fi.Mode()&os.ModeSymlink != 0 { + trace("processing symlink %s\n", path) + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + relPath, err := filepath.Rel(buildDir, path) + if err != nil { + return err + } + hdr.Name = relPath + + if err = tw.WriteHeader(hdr); err != nil { + return err + } + + switch hdr.Typeflag { + case tar.TypeReg, tar.TypeRegA: + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + if _, err = io.Copy(tw, f); err != nil { + return err + } + } + return nil + }) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz new file mode 100644 index 00000000..edb26fbd Binary files /dev/null and b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz differ diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go new file mode 100644 index 00000000..78c76121 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go @@ -0,0 +1,17 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +var trace func(msg string, args ...interface{}) + +func init() { + trace = func(msg string, args ...interface{}) {} +} + +func SetTraceFunction(fn func(msg string, args ...interface{})) { + trace = fn +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go new file mode 100644 index 00000000..f05f8f79 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go @@ -0,0 +1,128 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package snapshot + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "path/filepath" + + "github.com/jaypipes/ghw/pkg/option" +) + +const ( + TargetRoot = "ghw-snapshot-*" +) + +const ( + // If set, `ghw` will not unpack the snapshot in the user-supplied directory + // unless the aforementioned directory is empty. + OwnTargetDirectory = 1 << iota +) + +// Clanup removes the unpacket snapshot from the target root. +// Please not that the environs variable `GHW_SNAPSHOT_PRESERVE`, if set, +// will make this function silently skip. +func Cleanup(targetRoot string) error { + if option.EnvOrDefaultSnapshotPreserve() { + return nil + } + return os.RemoveAll(targetRoot) +} + +// Unpack expands the given snapshot in a temporary directory managed by `ghw`. Returns the path of that directory. +func Unpack(snapshotName string) (string, error) { + targetRoot, err := os.MkdirTemp("", TargetRoot) + if err != nil { + return "", err + } + _, err = UnpackInto(snapshotName, targetRoot, 0) + return targetRoot, err +} + +// UnpackInto expands the given snapshot in a client-supplied directory. +// Returns true if the snapshot was actually unpacked, false otherwise +func UnpackInto(snapshotName, targetRoot string, flags uint) (bool, error) { + if (flags&OwnTargetDirectory) == OwnTargetDirectory && !isEmptyDir(targetRoot) { + return false, nil + } + snap, err := os.Open(snapshotName) + if err != nil { + return false, err + } + defer snap.Close() + return true, Untar(targetRoot, snap) +} + +// Untar extracts data from the given reader (providing data in tar.gz format) and unpacks it in the given directory. +func Untar(root string, r io.Reader) error { + var err error + gzr, err := gzip.NewReader(r) + if err != nil { + return err + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + for { + header, err := tr.Next() + if err == io.EOF { + // we are done + return nil + } + + if err != nil { + // bail out + return err + } + + if header == nil { + // TODO: how come? + continue + } + + target := filepath.Join(root, header.Name) + mode := os.FileMode(header.Mode) + + switch header.Typeflag { + case tar.TypeDir: + err = os.MkdirAll(target, mode) + if err != nil { + return err + } + + case tar.TypeReg: + dst, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, mode) + if err != nil { + return err + } + + _, err = io.Copy(dst, tr) + if err != nil { + return err + } + + dst.Close() + + case tar.TypeSymlink: + err = os.Symlink(header.Linkname, target) + if err != nil { + return err + } + } + } +} + +func isEmptyDir(name string) bool { + entries, err := os.ReadDir(name) + if err != nil { + return false + } + return len(entries) == 0 +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go new file mode 100644 index 00000000..0210ab48 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go @@ -0,0 +1,165 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package topology + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/marshal" + "github.com/jaypipes/ghw/pkg/memory" + "github.com/jaypipes/ghw/pkg/option" +) + +// Architecture describes the overall hardware architecture. It can be either +// Symmetric Multi-Processor (SMP) or Non-Uniform Memory Access (NUMA) +type Architecture int + +const ( + // SMP is a Symmetric Multi-Processor system + ArchitectureSMP Architecture = iota + // NUMA is a Non-Uniform Memory Access system + ArchitectureNUMA +) + +const ( + // DEPRECATED: please use ArchitectureSMP. + // TODO(jaypipes): Remove before v1.0 + ARCHITECTURE_SMP = ArchitectureSMP + // DEPRECATED: please use ArchitectureNUMA. + // TODO(jaypipes): Remove before v1.0 + ARCHITECTURE_NUMA = ArchitectureNUMA +) + +var ( + architectureString = map[Architecture]string{ + ArchitectureSMP: "SMP", + ArchitectureNUMA: "NUMA", + } + + // NOTE(fromani): the keys are all lowercase and do not match + // the keys in the opposite table `architectureString`. + // This is done because of the choice we made in + // Architecture:MarshalJSON. + // We use this table only in UnmarshalJSON, so it should be OK. + stringArchitecture = map[string]Architecture{ + "smp": ArchitectureSMP, + "numa": ArchitectureNUMA, + } +) + +func (a Architecture) String() string { + return architectureString[a] +} + +// NOTE(jaypipes): since serialized output is as "official" as we're going to +// get, let's lowercase the string output when serializing, in order to +// "normalize" the expected serialized output +func (a Architecture) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(strings.ToLower(a.String()))), nil +} + +func (a *Architecture) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + key := strings.ToLower(s) + val, ok := stringArchitecture[key] + if !ok { + return fmt.Errorf("unknown architecture: %q", key) + } + *a = val + return nil +} + +// Node is an abstract construct representing a collection of processors and +// various levels of memory cache that those processors share. In a NUMA +// architecture, there are multiple NUMA nodes, abstracted here as multiple +// Node structs. In an SMP architecture, a single Node will be available in the +// Info struct and this single struct can be used to describe the levels of +// memory caching available to the single physical processor package's physical +// processor cores +type Node struct { + ID int `json:"id"` + Cores []*cpu.ProcessorCore `json:"cores"` + Caches []*memory.Cache `json:"caches"` + Distances []int `json:"distances"` + Memory *memory.Area `json:"memory"` +} + +func (n *Node) String() string { + return fmt.Sprintf( + "node #%d (%d cores)", + n.ID, + len(n.Cores), + ) +} + +// Info describes the system topology for the host hardware +type Info struct { + ctx *context.Context + Architecture Architecture `json:"architecture"` + Nodes []*Node `json:"nodes"` +} + +// New returns a pointer to an Info struct that contains information about the +// NUMA topology on the host system +func New(opts ...*option.Option) (*Info, error) { + merged := option.Merge(opts...) + ctx := context.New(merged) + info := &Info{ctx: ctx} + var err error + if context.Exists(merged) { + err = info.load() + } else { + err = ctx.Do(info.load) + } + if err != nil { + return nil, err + } + for _, node := range info.Nodes { + sort.Sort(memory.SortByCacheLevelTypeFirstProcessor(node.Caches)) + } + return info, nil +} + +func (i *Info) String() string { + archStr := "SMP" + if i.Architecture == ArchitectureNUMA { + archStr = "NUMA" + } + res := fmt.Sprintf( + "topology %s (%d nodes)", + archStr, + len(i.Nodes), + ) + return res +} + +// simple private struct used to encapsulate topology information in a +// top-level "topology" YAML/JSON map/object key +type topologyPrinter struct { + Info *Info `json:"topology"` +} + +// YAMLString returns a string with the topology information formatted as YAML +// under a top-level "topology:" key +func (i *Info) YAMLString() string { + return marshal.SafeYAML(i.ctx, topologyPrinter{i}) +} + +// JSONString returns a string with the topology information formatted as JSON +// under a top-level "topology:" key +func (i *Info) JSONString(indent bool) string { + return marshal.SafeJSON(i.ctx, topologyPrinter{i}, indent) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go new file mode 100644 index 00000000..dbd0811e --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go @@ -0,0 +1,107 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package topology + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" + "github.com/jaypipes/ghw/pkg/cpu" + "github.com/jaypipes/ghw/pkg/linuxpath" + "github.com/jaypipes/ghw/pkg/memory" +) + +func (i *Info) load() error { + i.Nodes = topologyNodes(i.ctx) + if len(i.Nodes) == 1 { + i.Architecture = ArchitectureSMP + } else { + i.Architecture = ArchitectureNUMA + } + return nil +} + +func topologyNodes(ctx *context.Context) []*Node { + paths := linuxpath.New(ctx) + nodes := make([]*Node, 0) + + files, err := os.ReadDir(paths.SysDevicesSystemNode) + if err != nil { + ctx.Warn("failed to determine nodes: %s\n", err) + return nodes + } + for _, file := range files { + filename := file.Name() + if !strings.HasPrefix(filename, "node") { + continue + } + node := &Node{} + nodeID, err := strconv.Atoi(filename[4:]) + if err != nil { + ctx.Warn("failed to determine node ID: %s\n", err) + return nodes + } + node.ID = nodeID + cores, err := cpu.CoresForNode(ctx, nodeID) + if err != nil { + ctx.Warn("failed to determine cores for node: %s\n", err) + return nodes + } + node.Cores = cores + caches, err := memory.CachesForNode(ctx, nodeID) + if err != nil { + ctx.Warn("failed to determine caches for node: %s\n", err) + return nodes + } + node.Caches = caches + + distances, err := distancesForNode(ctx, nodeID) + if err != nil { + ctx.Warn("failed to determine node distances for node: %s\n", err) + return nodes + } + node.Distances = distances + + area, err := memory.AreaForNode(ctx, nodeID) + if err != nil { + ctx.Warn("failed to determine memory area for node: %s\n", err) + return nodes + } + node.Memory = area + + nodes = append(nodes, node) + } + return nodes +} + +func distancesForNode(ctx *context.Context, nodeID int) ([]int, error) { + paths := linuxpath.New(ctx) + path := filepath.Join( + paths.SysDevicesSystemNode, + fmt.Sprintf("node%d", nodeID), + "distance", + ) + + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + items := strings.Fields(strings.TrimSpace(string(data))) + dists := make([]int, len(items)) // TODO: can a NUMA cell be offlined? + for idx, item := range items { + dist, err := strconv.Atoi(item) + if err != nil { + return dists, err + } + dists[idx] = dist + } + return dists, nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go new file mode 100644 index 00000000..b5ee4354 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go @@ -0,0 +1,19 @@ +//go:build !linux && !windows +// +build !linux,!windows + +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package topology + +import ( + "runtime" + + "github.com/pkg/errors" +) + +func (i *Info) load() error { + return errors.New("topologyFillInfo not implemented on " + runtime.GOOS) +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go new file mode 100644 index 00000000..2991aaa9 --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go @@ -0,0 +1,156 @@ +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package topology + +import ( + "encoding/binary" + "fmt" + "syscall" + "unsafe" +) + +const ( + rcFailure = 0 + sizeofLogicalProcessorInfo = 32 + errInsufficientBuffer syscall.Errno = 122 + + relationProcessorCore = 0 + relationNUMANode = 1 + relationCache = 2 + relationProcessorPackage = 3 + relationGroup = 4 +) + +func (i *Info) load() error { + nodes, err := topologyNodes() + if err != nil { + return err + } + i.Nodes = nodes + if len(nodes) == 1 { + i.Architecture = ArchitectureSMP + } else { + i.Architecture = ArchitectureNUMA + } + return nil +} + +func topologyNodes() ([]*Node, error) { + nodes := make([]*Node, 0) + lpis, err := getWin32LogicalProcessorInfos() + if err != nil { + return nil, err + } + for _, lpi := range lpis { + switch lpi.relationship { + case relationNUMANode: + nodes = append(nodes, &Node{ + ID: lpi.numaNodeID(), + }) + case relationProcessorCore: + // TODO(jaypipes): associated LP to processor core + case relationProcessorPackage: + // ignore + case relationCache: + // TODO(jaypipes) handle cache layers + default: + return nil, fmt.Errorf("Unknown LOGICAL_PROCESSOR_RELATIONSHIP value: %d", lpi.relationship) + + } + } + return nodes, nil +} + +// This is the CACHE_DESCRIPTOR struct in the Win32 API +type cacheDescriptor struct { + level uint8 + associativity uint8 + lineSize uint16 + size uint32 + cacheType uint32 +} + +// This is the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct in the Win32 API +type logicalProcessorInfo struct { + processorMask uint64 + relationship uint64 + // The following dummyunion member is a representation of this part of + // the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct: + // + // union { + // struct { + // BYTE Flags; + // } ProcessorCore; + // struct { + // DWORD NodeNumber; + // } NumaNode; + // CACHE_DESCRIPTOR Cache; + // ULONGLONG Reserved[2]; + // } DUMMYUNIONNAME; + dummyunion [16]byte +} + +// numaNodeID returns the NUMA node's identifier from the logical processor +// information struct by grabbing the integer representation of the struct's +// NumaNode unioned data element +func (lpi *logicalProcessorInfo) numaNodeID() int { + if lpi.relationship != relationNUMANode { + return -1 + } + return int(binary.LittleEndian.Uint16(lpi.dummyunion[0:])) +} + +// ref: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation +func getWin32LogicalProcessorInfos() ( + []*logicalProcessorInfo, + error, +) { + lpis := make([]*logicalProcessorInfo, 0) + win32api := syscall.NewLazyDLL("kernel32.dll") + glpi := win32api.NewProc("GetLogicalProcessorInformation") + + // The way the GetLogicalProcessorInformation (GLPI) Win32 API call + // works is wonky, but consistent with the Win32 API calling structure. + // Basically, you need to first call the GLPI API with a NUL pointerr + // and a pointer to an integer. That first call to the API should + // return ERROR_INSUFFICIENT_BUFFER, which is the indication that the + // supplied buffer pointer is NUL and needs to have memory allocated to + // it of an amount equal to the value of the integer pointer argument. + // Once the buffer is allocated this amount of space, the GLPI API call + // is again called. This time, the return value should be 0 and the + // buffer will have been set to an array of + // SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs. + toAllocate := uint32(0) + // first, figure out how much we need + rc, _, win32err := glpi.Call(uintptr(0), uintptr(unsafe.Pointer(&toAllocate))) + if rc == rcFailure { + if win32err != errInsufficientBuffer { + return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call failed to return ERROR_INSUFFICIENT_BUFFER") + } + } else { + // This shouldn't happen because buffer hasn't yet been allocated... + return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call returned success instead of failure with ERROR_INSUFFICIENT_BUFFER") + } + + // OK, now we actually allocate a raw buffer to fill with some number + // of SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs + b := make([]byte, toAllocate) + rc, _, win32err = glpi.Call(uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&toAllocate))) + if rc == rcFailure { + return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API call failed to set supplied buffer. Win32 system error: %s", win32err) + } + + for x := uint32(0); x < toAllocate; x += sizeofLogicalProcessorInfo { + lpiraw := b[x : x+sizeofLogicalProcessorInfo] + lpi := &logicalProcessorInfo{ + processorMask: binary.LittleEndian.Uint64(lpiraw[0:]), + relationship: binary.LittleEndian.Uint64(lpiraw[8:]), + } + copy(lpi.dummyunion[0:16], lpiraw[16:32]) + lpis = append(lpis, lpi) + } + return lpis, nil +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go new file mode 100644 index 00000000..13fa7b5b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go @@ -0,0 +1,37 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package unitutil + +var ( + KB int64 = 1024 + MB = KB * 1024 + GB = MB * 1024 + TB = GB * 1024 + PB = TB * 1024 + EB = PB * 1024 +) + +// AmountString returns a string representation of the amount with an amount +// suffix corresponding to the nearest kibibit. +// +// For example, AmountString(1022) == "1022). AmountString(1024) == "1KB", etc +func AmountString(size int64) (int64, string) { + switch { + case size < MB: + return KB, "KB" + case size < GB: + return MB, "MB" + case size < TB: + return GB, "GB" + case size < PB: + return TB, "TB" + case size < EB: + return PB, "PB" + default: + return EB, "EB" + } +} diff --git a/vendor/github.com/jaypipes/ghw/pkg/util/util.go b/vendor/github.com/jaypipes/ghw/pkg/util/util.go new file mode 100644 index 00000000..816aeb1b --- /dev/null +++ b/vendor/github.com/jaypipes/ghw/pkg/util/util.go @@ -0,0 +1,84 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package util + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/jaypipes/ghw/pkg/context" +) + +const ( + UNKNOWN = "unknown" +) + +type closer interface { + Close() error +} + +func SafeClose(c closer) { + err := c.Close() + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "failed to close: %s", err) + } +} + +// Reads a supplied filepath and converts the contents to an integer. Returns +// -1 if there were file permissions or existence errors or if the contents +// could not be successfully converted to an integer. In any error, a warning +// message is printed to STDERR and -1 is returned. +func SafeIntFromFile(ctx *context.Context, path string) int { + msg := "failed to read int from file: %s\n" + buf, err := os.ReadFile(path) + if err != nil { + ctx.Warn(msg, err) + return -1 + } + contents := strings.TrimSpace(string(buf)) + res, err := strconv.Atoi(contents) + if err != nil { + ctx.Warn(msg, err) + return -1 + } + return res +} + +// ConcatStrings concatenate strings in a larger one. This function +// addresses a very specific ghw use case. For a more general approach, +// just use strings.Join() +func ConcatStrings(items ...string) string { + return strings.Join(items, "") +} + +// Convert strings to bool using strconv.ParseBool() when recognized, otherwise +// use map lookup to convert strings like "Yes" "No" "On" "Off" to bool +// `ethtool` uses on, off, yes, no (upper and lower case) rather than true and +// false. +func ParseBool(str string) (bool, error) { + if b, err := strconv.ParseBool(str); err == nil { + return b, err + } else { + ExtraBools := map[string]bool{ + "on": true, + "off": false, + "yes": true, + "no": false, + // Return false instead of an error on empty strings + // For example from empty files in SysClassNet/Device + "": false, + } + if b, ok := ExtraBools[strings.ToLower(str)]; ok { + return b, nil + } else { + // Return strconv.ParseBool's error here + return b, err + } + } +} diff --git a/vendor/github.com/jaypipes/pcidb/.gitignore b/vendor/github.com/jaypipes/pcidb/.gitignore new file mode 100644 index 00000000..cc292d34 --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/.gitignore @@ -0,0 +1,2 @@ +vendor/ +coverage*.* diff --git a/vendor/github.com/jaypipes/pcidb/COPYING b/vendor/github.com/jaypipes/pcidb/COPYING new file mode 100644 index 00000000..68c771a0 --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/COPYING @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/vendor/github.com/jaypipes/pcidb/LICENSE b/vendor/github.com/jaypipes/pcidb/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/jaypipes/pcidb/Makefile b/vendor/github.com/jaypipes/pcidb/Makefile new file mode 100644 index 00000000..73a274c7 --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/Makefile @@ -0,0 +1,38 @@ +VENDOR := vendor +PKGS := $(shell go list ./... | grep -v /$(VENDOR)/) +SRC = $(shell find . -type f -name '*.go' -not -path "*/$(VENDOR)/*") +BIN_DIR := $(GOPATH)/bin +GOMETALINTER := $(BIN_DIR)/gometalinter + +.PHONY: test +test: vet + go test $(PKGS) + +$(GOMETALINTER): + go get -u github.com/alecthomas/gometalinter + $(GOMETALINTER) --install &> /dev/null + +.PHONY: lint +lint: $(GOMETALINTER) + $(GOMETALINTER) ./... --vendor + +.PHONY: fmt +fmt: + @gofmt -s -l -w $(SRC) + +.PHONY: fmtcheck +fmtcheck: + @bash -c "diff -u <(echo -n) <(gofmt -d $(SRC))" + +.PHONY: vet +vet: + go vet $(PKGS) + +.PHONY: cover +cover: + $(shell [ -e coverage.out ] && rm coverage.out) + @echo "mode: count" > coverage-all.out + @$(foreach pkg,$(PKGS),\ + go test -coverprofile=coverage.out -covermode=count $(pkg);\ + tail -n +2 coverage.out >> coverage-all.out;) + go tool cover -html=coverage-all.out -o=coverage-all.html diff --git a/vendor/github.com/jaypipes/pcidb/README.md b/vendor/github.com/jaypipes/pcidb/README.md new file mode 100644 index 00000000..27485dca --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/README.md @@ -0,0 +1,417 @@ +# `pcidb` - the Golang PCI DB library + +[![Build Status](https://github.com/jaypipes/pcidb/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/jaypipes/pcidb/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/pcidb)](https://goreportcard.com/report/github.com/jaypipes/pcidb) +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md) + +`pcidb` is a small Golang library for programmatic querying of PCI vendor, +product and class information. + +We currently test `pcidb` on Linux, Windows and MacOSX. + +## Usage + +`pcidb` contains a PCI database inspection and querying facility that allows +developers to query for information about hardware device classes, vendor and +product information. + +The `pcidb.New()` function returns a `pcidb.PCIDB` struct or an error if the +PCI database could not be loaded. + +> `pcidb`'s default behaviour is to first search for pci-ids DB files on the +> local host system in well-known filesystem paths. If `pcidb` cannot find a +> pci-ids DB file on the local host system, you can configure `pcidb` to fetch +> a current pci-ids DB file from the network. You can enable this +> network-fetching behaviour with the `pcidb.WithEnableNetworkFetch()` function +> or set the `PCIDB_ENABLE_NETWORK_FETCH` to a non-0 value. + +The `pcidb.PCIDB` struct contains a number of fields that may be queried for +PCI information: + +* `pcidb.PCIDB.Classes` is a map, keyed by the PCI class ID (a hex-encoded + string) of pointers to `pcidb.Class` structs, one for each class of PCI + device known to `pcidb` +* `pcidb.PCIDB.Vendors` is a map, keyed by the PCI vendor ID (a hex-encoded + string) of pointers to `pcidb.Vendor` structs, one for each PCI vendor + known to `pcidb` +* `pcidb.PCIDB.Products` is a map, keyed by the PCI product ID* (a hex-encoded + string) of pointers to `pcidb.Product` structs, one for each PCI product + known to `pcidb` + +**NOTE**: PCI products are often referred to by their "device ID". We use +the term "product ID" in `pcidb` because it more accurately reflects what the +identifier is for: a specific product line produced by the vendor. + +### Overriding the root mountpoint `pcidb` uses + +The default root mountpoint that `pcidb` uses when looking for information +about the host system is `/`. So, for example, when looking up known PCI IDS DB +files on Linux, `pcidb` will attempt to discover a pciids DB file at +`/usr/share/misc/pci.ids`. If you are calling `pcidb` from a system that has an +alternate root mountpoint, you can either set the `PCIDB_CHROOT` environment +variable to that alternate path, or call the `pcidb.New()` function with the +`pcidb.WithChroot()` modifier. + +For example, if you are executing from within an application container that has +bind-mounted the root host filesystem to the mount point `/host`, you would set +`PCIDB_CHROOT` to `/host` so that pcidb can find files like +`/usr/share/misc/pci.ids` at `/host/usr/share/misc/pci.ids`. + +Alternately, you can use the `pcidb.WithChroot()` function like so: + +```go +pci := pcidb.New(pcidb.WithChroot("/host")) +``` + +### PCI device classes + +Let's take a look at the PCI device class information and how to query the PCI +database for class, subclass, and programming interface information. + +Each `pcidb.Class` struct contains the following fields: + +* `pcidb.Class.ID` is the hex-encoded string identifier for the device + class +* `pcidb.Class.Name` is the common name/description of the class +* `pcidb.Class.Subclasses` is an array of pointers to + `pcidb.Subclass` structs, one for each subclass in the device class + +Each `pcidb.Subclass` struct contains the following fields: + +* `pcidb.Subclass.ID` is the hex-encoded string identifier for the device + subclass +* `pcidb.Subclass.Name` is the common name/description of the subclass +* `pcidb.Subclass.ProgrammingInterfaces` is an array of pointers to + `pcidb.ProgrammingInterface` structs, one for each programming interface + for the device subclass + +Each `pcidb.ProgrammingInterface` struct contains the following fields: + +* `pcidb.ProgrammingInterface.ID` is the hex-encoded string identifier for + the programming interface +* `pcidb.ProgrammingInterface.Name` is the common name/description for the + programming interface + +```go +package main + +import ( + "fmt" + + "github.com/jaypipes/pcidb" +) + +func main() { + pci, err := pcidb.New() + if err != nil { + fmt.Printf("Error getting PCI info: %v", err) + } + + for _, devClass := range pci.Classes { + fmt.Printf(" Device class: %v ('%v')\n", devClass.Name, devClass.ID) + for _, devSubclass := range devClass.Subclasses { + fmt.Printf(" Device subclass: %v ('%v')\n", devSubclass.Name, devSubclass.ID) + for _, progIface := range devSubclass.ProgrammingInterfaces { + fmt.Printf(" Programming interface: %v ('%v')\n", progIface.Name, progIface.ID) + } + } + } +} +``` + +Example output from my personal workstation, snipped for brevity: + +``` +... + Device class: Serial bus controller ('0c') + Device subclass: FireWire (IEEE 1394) ('00') + Programming interface: Generic ('00') + Programming interface: OHCI ('10') + Device subclass: ACCESS Bus ('01') + Device subclass: SSA ('02') + Device subclass: USB controller ('03') + Programming interface: UHCI ('00') + Programming interface: OHCI ('10') + Programming interface: EHCI ('20') + Programming interface: XHCI ('30') + Programming interface: Unspecified ('80') + Programming interface: USB Device ('fe') + Device subclass: Fibre Channel ('04') + Device subclass: SMBus ('05') + Device subclass: InfiniBand ('06') + Device subclass: IPMI SMIC interface ('07') + Device subclass: SERCOS interface ('08') + Device subclass: CANBUS ('09') +... +``` + +### PCI vendors and products + +Let's take a look at the PCI vendor information and how to query the PCI +database for vendor information and the products a vendor supplies. + +Each `pcidb.Vendor` struct contains the following fields: + +* `pcidb.Vendor.ID` is the hex-encoded string identifier for the vendor +* `pcidb.Vendor.Name` is the common name/description of the vendor +* `pcidb.Vendor.Products` is an array of pointers to `pcidb.Product` + structs, one for each product supplied by the vendor + +Each `pcidb.Product` struct contains the following fields: + +* `pcidb.Product.VendorID` is the hex-encoded string identifier for the + product's vendor +* `pcidb.Product.ID` is the hex-encoded string identifier for the product +* `pcidb.Product.Name` is the common name/description of the subclass +* `pcidb.Product.Subsystems` is an array of pointers to + `pcidb.Product` structs, one for each "subsystem" (sometimes called + "sub-device" in PCI literature) for the product + +**NOTE**: A subsystem product may have a different vendor than its "parent" PCI +product. This is sometimes referred to as the "sub-vendor". + +Here's some example code that demonstrates listing the PCI vendors with the +most known products: + +```go +package main + +import ( + "fmt" + "sort" + + "github.com/jaypipes/pcidb" +) + +type ByCountProducts []*pcidb.Vendor + +func (v ByCountProducts) Len() int { + return len(v) +} + +func (v ByCountProducts) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +func (v ByCountProducts) Less(i, j int) bool { + return len(v[i].Products) > len(v[j].Products) +} + +func main() { + pci, err := pcidb.New() + if err != nil { + fmt.Printf("Error getting PCI info: %v", err) + } + + vendors := make([]*pcidb.Vendor, len(pci.Vendors)) + x := 0 + for _, vendor := range pci.Vendors { + vendors[x] = vendor + x++ + } + + sort.Sort(ByCountProducts(vendors)) + + fmt.Println("Top 5 vendors by product") + fmt.Println("====================================================") + for _, vendor := range vendors[0:5] { + fmt.Printf("%v ('%v') has %d products\n", vendor.Name, vendor.ID, len(vendor.Products)) + } +} +``` + +which yields (on my local workstation as of July 7th, 2018): + +``` +Top 5 vendors by product +==================================================== +Intel Corporation ('8086') has 3389 products +NVIDIA Corporation ('10de') has 1358 products +Advanced Micro Devices, Inc. [AMD/ATI] ('1002') has 886 products +National Instruments ('1093') has 601 products +Chelsio Communications Inc ('1425') has 525 products +``` + +The following is an example of querying the PCI product and subsystem +information to find the products which have the most number of subsystems that +have a different vendor than the top-level product. In other words, the two +products which have been re-sold or re-manufactured with the most number of +different companies. + +```go +package main + +import ( + "fmt" + "sort" + + "github.com/jaypipes/pcidb" +) + +type ByCountSeparateSubvendors []*pcidb.Product + +func (v ByCountSeparateSubvendors) Len() int { + return len(v) +} + +func (v ByCountSeparateSubvendors) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +func (v ByCountSeparateSubvendors) Less(i, j int) bool { + iVendor := v[i].VendorID + iSetSubvendors := make(map[string]bool, 0) + iNumDiffSubvendors := 0 + jVendor := v[j].VendorID + jSetSubvendors := make(map[string]bool, 0) + jNumDiffSubvendors := 0 + + for _, sub := range v[i].Subsystems { + if sub.VendorID != iVendor { + iSetSubvendors[sub.VendorID] = true + } + } + iNumDiffSubvendors = len(iSetSubvendors) + + for _, sub := range v[j].Subsystems { + if sub.VendorID != jVendor { + jSetSubvendors[sub.VendorID] = true + } + } + jNumDiffSubvendors = len(jSetSubvendors) + + return iNumDiffSubvendors > jNumDiffSubvendors +} + +func main() { + pci, err := pcidb.New() + if err != nil { + fmt.Printf("Error getting PCI info: %v", err) + } + + products := make([]*pcidb.Product, len(pci.Products)) + x := 0 + for _, product := range pci.Products { + products[x] = product + x++ + } + + sort.Sort(ByCountSeparateSubvendors(products)) + + fmt.Println("Top 2 products by # different subvendors") + fmt.Println("====================================================") + for _, product := range products[0:2] { + vendorID := product.VendorID + vendor := pci.Vendors[vendorID] + setSubvendors := make(map[string]bool, 0) + + for _, sub := range product.Subsystems { + if sub.VendorID != vendorID { + setSubvendors[sub.VendorID] = true + } + } + fmt.Printf("%v ('%v') from %v\n", product.Name, product.ID, vendor.Name) + fmt.Printf(" -> %d subsystems under the following different vendors:\n", len(setSubvendors)) + for subvendorID, _ := range setSubvendors { + subvendor, exists := pci.Vendors[subvendorID] + subvendorName := "Unknown subvendor" + if exists { + subvendorName = subvendor.Name + } + fmt.Printf(" - %v ('%v')\n", subvendorName, subvendorID) + } + } +} +``` + +which yields (on my local workstation as of July 7th, 2018): + +``` +Top 2 products by # different subvendors +==================================================== +RTL-8100/8101L/8139 PCI Fast Ethernet Adapter ('8139') from Realtek Semiconductor Co., Ltd. + -> 34 subsystems under the following different vendors: + - OVISLINK Corp. ('149c') + - EPoX Computer Co., Ltd. ('1695') + - Red Hat, Inc ('1af4') + - Mitac ('1071') + - Netgear ('1385') + - Micro-Star International Co., Ltd. [MSI] ('1462') + - Hangzhou Silan Microelectronics Co., Ltd. ('1904') + - Compex ('11f6') + - Edimax Computer Co. ('1432') + - KYE Systems Corporation ('1489') + - ZyXEL Communications Corporation ('187e') + - Acer Incorporated [ALI] ('1025') + - Matsushita Electric Industrial Co., Ltd. ('10f7') + - Ruby Tech Corp. ('146c') + - Belkin ('1799') + - Allied Telesis ('1259') + - Unex Technology Corp. ('1429') + - CIS Technology Inc ('1436') + - D-Link System Inc ('1186') + - Ambicom Inc ('1395') + - AOPEN Inc. ('a0a0') + - TTTech Computertechnik AG (Wrong ID) ('0357') + - Gigabyte Technology Co., Ltd ('1458') + - Packard Bell B.V. ('1631') + - Billionton Systems Inc ('14cb') + - Kingston Technologies ('2646') + - Accton Technology Corporation ('1113') + - Samsung Electronics Co Ltd ('144d') + - Biostar Microtech Int'l Corp ('1565') + - U.S. Robotics ('16ec') + - KTI ('8e2e') + - Hewlett-Packard Company ('103c') + - ASUSTeK Computer Inc. ('1043') + - Surecom Technology ('10bd') +Bt878 Video Capture ('036e') from Brooktree Corporation + -> 30 subsystems under the following different vendors: + - iTuner ('aa00') + - Nebula Electronics Ltd. ('0071') + - DViCO Corporation ('18ac') + - iTuner ('aa05') + - iTuner ('aa0d') + - LeadTek Research Inc. ('107d') + - Avermedia Technologies Inc ('1461') + - Chaintech Computer Co. Ltd ('270f') + - iTuner ('aa07') + - iTuner ('aa0a') + - Microtune, Inc. ('1851') + - iTuner ('aa01') + - iTuner ('aa04') + - iTuner ('aa06') + - iTuner ('aa0f') + - iTuner ('aa02') + - iTuner ('aa0b') + - Pinnacle Systems, Inc. (Wrong ID) ('bd11') + - Rockwell International ('127a') + - Askey Computer Corp. ('144f') + - Twinhan Technology Co. Ltd ('1822') + - Anritsu Corp. ('1852') + - iTuner ('aa08') + - Hauppauge computer works Inc. ('0070') + - Pinnacle Systems Inc. ('11bd') + - Conexant Systems, Inc. ('14f1') + - iTuner ('aa09') + - iTuner ('aa03') + - iTuner ('aa0c') + - iTuner ('aa0e') +``` + +## Developers + +Contributions to `pcidb` are welcomed! Fork the repo on GitHub and submit a pull +request with your proposed changes. Or, feel free to log an issue for a feature +request or bug report. + +### Running tests + +You can run unit tests easily using the `make test` command, like so: + + +``` +[jaypipes@uberbox pcidb]$ make test +go test github.com/jaypipes/pcidb +ok github.com/jaypipes/pcidb 0.045s +``` diff --git a/vendor/github.com/jaypipes/pcidb/context.go b/vendor/github.com/jaypipes/pcidb/context.go new file mode 100644 index 00000000..da345996 --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/context.go @@ -0,0 +1,86 @@ +package pcidb + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + homedir "github.com/mitchellh/go-homedir" +) + +// Concrete merged set of configuration switches that get passed to pcidb +// internal functions +type context struct { + chroot string + cacheOnly bool + cachePath string + path string + enableNetworkFetch bool + searchPaths []string +} + +func contextFromOptions(merged *WithOption) *context { + ctx := &context{ + chroot: *merged.Chroot, + cacheOnly: *merged.CacheOnly, + cachePath: getCachePath(), + enableNetworkFetch: *merged.EnableNetworkFetch, + path: *merged.Path, + searchPaths: make([]string, 0), + } + ctx.setSearchPaths() + return ctx +} + +func getCachePath() string { + hdir, err := homedir.Dir() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed getting homedir.Dir(): %v", err) + return "" + } + fp, err := homedir.Expand(filepath.Join(hdir, ".cache", "pci.ids")) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed expanding local cache path: %v", err) + return "" + } + return fp +} + +// Depending on the operating system, sets the context's searchPaths to a set +// of local filepaths to search for a pci.ids database file +func (ctx *context) setSearchPaths() { + // Look in direct path first, if set + if ctx.path != "" { + ctx.searchPaths = append(ctx.searchPaths, ctx.path) + return + } + // A set of filepaths we will first try to search for the pci-ids DB file + // on the local machine. If we fail to find one, we'll try pulling the + // latest pci-ids file from the network + ctx.searchPaths = append(ctx.searchPaths, ctx.cachePath) + if ctx.cacheOnly { + return + } + + rootPath := ctx.chroot + + if runtime.GOOS != "windows" { + ctx.searchPaths = append( + ctx.searchPaths, + filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids"), + ) + ctx.searchPaths = append( + ctx.searchPaths, + filepath.Join(rootPath, "usr", "share", "misc", "pci.ids"), + ) + ctx.searchPaths = append( + ctx.searchPaths, + filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids.gz"), + ) + ctx.searchPaths = append( + ctx.searchPaths, + filepath.Join(rootPath, "usr", "share", "misc", "pci.ids.gz"), + ) + } +} diff --git a/vendor/github.com/jaypipes/pcidb/discover.go b/vendor/github.com/jaypipes/pcidb/discover.go new file mode 100644 index 00000000..b0452d7d --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/discover.go @@ -0,0 +1,111 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pcidb + +import ( + "bufio" + "compress/gzip" + "io" + "net/http" + "os" + "path/filepath" + "strings" +) + +const ( + PCIIDS_URI = "https://pci-ids.ucw.cz/v2.2/pci.ids.gz" + USER_AGENT = "golang-jaypipes-pcidb" +) + +func (db *PCIDB) load(ctx *context) error { + var foundPath string + for _, fp := range ctx.searchPaths { + if _, err := os.Stat(fp); err == nil { + foundPath = fp + break + } + } + if foundPath == "" { + if !ctx.enableNetworkFetch { + return ERR_NO_DB + } + // OK, so we didn't find any host-local copy of the pci-ids DB file. Let's + // try fetching it from the network and storing it + if err := cacheDBFile(ctx.cachePath); err != nil { + return err + } + foundPath = ctx.cachePath + } + f, err := os.Open(foundPath) + if err != nil { + return err + } + defer f.Close() + + var scanner *bufio.Scanner + if strings.HasSuffix(foundPath, ".gz") { + var zipReader *gzip.Reader + if zipReader, err = gzip.NewReader(f); err != nil { + return err + } + defer zipReader.Close() + scanner = bufio.NewScanner(zipReader) + } else { + scanner = bufio.NewScanner(f) + } + + return parseDBFile(db, scanner) +} + +func ensureDir(fp string) error { + fpDir := filepath.Dir(fp) + if _, err := os.Stat(fpDir); os.IsNotExist(err) { + err = os.MkdirAll(fpDir, os.ModePerm) + if err != nil { + return err + } + } + return nil +} + +// Pulls down the latest copy of the pci-ids file from the network and stores +// it in the local host filesystem +func cacheDBFile(cacheFilePath string) error { + ensureDir(cacheFilePath) + + client := new(http.Client) + request, err := http.NewRequest("GET", PCIIDS_URI, nil) + if err != nil { + return err + } + request.Header.Set("User-Agent", USER_AGENT) + response, err := client.Do(request) + if err != nil { + return err + } + defer response.Body.Close() + f, err := os.Create(cacheFilePath) + if err != nil { + return err + } + defer func() { + if err != nil { + os.Remove(cacheFilePath) + } + }() + defer f.Close() + // write the gunzipped contents to our local cache file + zr, err := gzip.NewReader(response.Body) + if err != nil { + return err + } + defer zr.Close() + if _, err = io.Copy(f, zr); err != nil { + return err + } + return err +} diff --git a/vendor/github.com/jaypipes/pcidb/main.go b/vendor/github.com/jaypipes/pcidb/main.go new file mode 100644 index 00000000..d518748e --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/main.go @@ -0,0 +1,196 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pcidb + +import ( + "fmt" + "os" + "strconv" +) + +var ( + ERR_NO_DB = fmt.Errorf("No pci-ids DB files found (and network fetch disabled)") + trueVar = true +) + +// ProgrammingInterface is the PCI programming interface for a class of PCI +// devices +type ProgrammingInterface struct { + // hex-encoded PCI_ID of the programming interface + ID string `json:"id"` + // common string name for the programming interface + Name string `json:"name"` +} + +// Subclass is a subdivision of a PCI class +type Subclass struct { + // hex-encoded PCI_ID for the device subclass + ID string `json:"id"` + // common string name for the subclass + Name string `json:"name"` + // any programming interfaces this subclass might have + ProgrammingInterfaces []*ProgrammingInterface `json:"programming_interfaces"` +} + +// Class is the PCI class +type Class struct { + // hex-encoded PCI_ID for the device class + ID string `json:"id"` + // common string name for the class + Name string `json:"name"` + // any subclasses belonging to this class + Subclasses []*Subclass `json:"subclasses"` +} + +// Product provides information about a PCI device model +// NOTE(jaypipes): In the hardware world, the PCI "device_id" is the identifier +// for the product/model +type Product struct { + // vendor ID for the product + VendorID string `json:"vendor_id"` + // hex-encoded PCI_ID for the product/model + ID string `json:"id"` + // common string name of the vendor + Name string `json:"name"` + // "subdevices" or "subsystems" for the product + Subsystems []*Product `json:"subsystems"` +} + +// Vendor provides information about a device vendor +type Vendor struct { + // hex-encoded PCI_ID for the vendor + ID string `json:"id"` + // common string name of the vendor + Name string `json:"name"` + // all top-level devices for the vendor + Products []*Product `json:"products"` +} + +type PCIDB struct { + // hash of class ID -> class information + Classes map[string]*Class `json:"classes"` + // hash of vendor ID -> vendor information + Vendors map[string]*Vendor `json:"vendors"` + // hash of vendor ID + product/device ID -> product information + Products map[string]*Product `json:"products"` +} + +// WithOption is used to represent optionally-configured settings +type WithOption struct { + // Chroot is the directory that pcidb uses when attempting to discover + // pciids DB files + Chroot *string + // CacheOnly is mostly just useful for testing. It essentially disables + // looking for any non ~/.cache/pci.ids filepaths (which is useful when we + // want to test the fetch-from-network code paths + CacheOnly *bool + // Enables fetching a pci-ids from a known location on the network if no + // local pci-ids DB files can be found. + EnableNetworkFetch *bool + // Path points to the absolute path of a pci.ids file in a non-standard + // location. + Path *string +} + +func WithChroot(dir string) *WithOption { + return &WithOption{Chroot: &dir} +} + +func WithCacheOnly() *WithOption { + return &WithOption{CacheOnly: &trueVar} +} + +func WithDirectPath(path string) *WithOption { + return &WithOption{Path: &path} +} + +func WithEnableNetworkFetch() *WithOption { + return &WithOption{EnableNetworkFetch: &trueVar} +} + +func mergeOptions(opts ...*WithOption) *WithOption { + // Grab options from the environs by default + defaultChroot := "/" + if val, exists := os.LookupEnv("PCIDB_CHROOT"); exists { + defaultChroot = val + } + path := "" + if val, exists := os.LookupEnv("PCIDB_PATH"); exists { + path = val + } + defaultCacheOnly := false + if val, exists := os.LookupEnv("PCIDB_CACHE_ONLY"); exists { + if parsed, err := strconv.ParseBool(val); err != nil { + fmt.Fprintf( + os.Stderr, + "Failed parsing a bool from PCIDB_CACHE_ONLY "+ + "environ value of %s", + val, + ) + } else if parsed { + defaultCacheOnly = parsed + } + } + defaultEnableNetworkFetch := false + if val, exists := os.LookupEnv("PCIDB_ENABLE_NETWORK_FETCH"); exists { + if parsed, err := strconv.ParseBool(val); err != nil { + fmt.Fprintf( + os.Stderr, + "Failed parsing a bool from PCIDB_ENABLE_NETWORK_FETCH "+ + "environ value of %s", + val, + ) + } else if parsed { + defaultEnableNetworkFetch = parsed + } + } + + merged := &WithOption{} + for _, opt := range opts { + if opt.Chroot != nil { + merged.Chroot = opt.Chroot + } + if opt.CacheOnly != nil { + merged.CacheOnly = opt.CacheOnly + } + if opt.EnableNetworkFetch != nil { + merged.EnableNetworkFetch = opt.EnableNetworkFetch + } + if opt.Path != nil { + merged.Path = opt.Path + } + } + // Set the default value if missing from merged + if merged.Chroot == nil { + merged.Chroot = &defaultChroot + } + if merged.CacheOnly == nil { + merged.CacheOnly = &defaultCacheOnly + } + if merged.EnableNetworkFetch == nil { + merged.EnableNetworkFetch = &defaultEnableNetworkFetch + } + if merged.Path == nil { + merged.Path = &path + } + return merged +} + +// New returns a pointer to a PCIDB struct which contains information you can +// use to query PCI vendor, product and class information. It accepts zero or +// more pointers to WithOption structs. If you want to modify the behaviour of +// pcidb, use one of the option modifiers when calling New. For example, to +// change the root directory that pcidb uses when discovering pciids DB files, +// call New(WithChroot("/my/root/override")) +func New(opts ...*WithOption) (*PCIDB, error) { + ctx := contextFromOptions(mergeOptions(opts...)) + db := &PCIDB{} + if err := db.load(ctx); err != nil { + return nil, err + } + return db, nil +} diff --git a/vendor/github.com/jaypipes/pcidb/parse.go b/vendor/github.com/jaypipes/pcidb/parse.go new file mode 100644 index 00000000..0fee5fe5 --- /dev/null +++ b/vendor/github.com/jaypipes/pcidb/parse.go @@ -0,0 +1,163 @@ +// +// Use and distribution licensed under the Apache license version 2. +// +// See the COPYING file in the root project directory for full text. +// + +package pcidb + +import ( + "bufio" + "strings" +) + +func parseDBFile(db *PCIDB, scanner *bufio.Scanner) error { + inClassBlock := false + db.Classes = make(map[string]*Class, 20) + db.Vendors = make(map[string]*Vendor, 200) + db.Products = make(map[string]*Product, 1000) + subclasses := make([]*Subclass, 0) + progIfaces := make([]*ProgrammingInterface, 0) + var curClass *Class + var curSubclass *Subclass + var curProgIface *ProgrammingInterface + vendorProducts := make([]*Product, 0) + var curVendor *Vendor + var curProduct *Product + var curSubsystem *Product + productSubsystems := make([]*Product, 0) + for scanner.Scan() { + line := scanner.Text() + // skip comments and blank lines + if line == "" || strings.HasPrefix(line, "#") { + continue + } + lineBytes := []rune(line) + + // Lines starting with an uppercase "C" indicate a PCI top-level class + // dbrmation block. These lines look like this: + // + // C 02 Network controller + if lineBytes[0] == 'C' { + if curClass != nil { + // finalize existing class because we found a new class block + curClass.Subclasses = subclasses + subclasses = make([]*Subclass, 0) + } + inClassBlock = true + classID := string(lineBytes[2:4]) + className := string(lineBytes[6:]) + curClass = &Class{ + ID: classID, + Name: className, + Subclasses: subclasses, + } + db.Classes[curClass.ID] = curClass + continue + } + + // Lines not beginning with an uppercase "C" or a TAB character + // indicate a top-level vendor dbrmation block. These lines look like + // this: + // + // 0a89 BREA Technologies Inc + if lineBytes[0] != '\t' { + if curVendor != nil { + // finalize existing vendor because we found a new vendor block + curVendor.Products = vendorProducts + vendorProducts = make([]*Product, 0) + } + inClassBlock = false + vendorID := string(lineBytes[0:4]) + vendorName := string(lineBytes[6:]) + curVendor = &Vendor{ + ID: vendorID, + Name: vendorName, + Products: vendorProducts, + } + db.Vendors[curVendor.ID] = curVendor + continue + } + + // Lines beginning with only a single TAB character are *either* a + // subclass OR are a device dbrmation block. If we're in a class + // block (i.e. the last parsed block header was for a PCI class), then + // we parse a subclass block. Otherwise, we parse a device dbrmation + // block. + // + // A subclass dbrmation block looks like this: + // + // \t00 Non-VGA unclassified device + // + // A device dbrmation block looks like this: + // + // \t0002 PCI to MCA Bridge + if len(lineBytes) > 1 && lineBytes[1] != '\t' { + if inClassBlock { + if curSubclass != nil { + // finalize existing subclass because we found a new subclass block + curSubclass.ProgrammingInterfaces = progIfaces + progIfaces = make([]*ProgrammingInterface, 0) + } + subclassID := string(lineBytes[1:3]) + subclassName := string(lineBytes[5:]) + curSubclass = &Subclass{ + ID: subclassID, + Name: subclassName, + ProgrammingInterfaces: progIfaces, + } + subclasses = append(subclasses, curSubclass) + } else { + if curProduct != nil { + // finalize existing product because we found a new product block + curProduct.Subsystems = productSubsystems + productSubsystems = make([]*Product, 0) + } + productID := string(lineBytes[1:5]) + productName := string(lineBytes[7:]) + productKey := curVendor.ID + productID + curProduct = &Product{ + VendorID: curVendor.ID, + ID: productID, + Name: productName, + } + vendorProducts = append(vendorProducts, curProduct) + db.Products[productKey] = curProduct + } + } else { + // Lines beginning with two TAB characters are *either* a subsystem + // (subdevice) OR are a programming interface for a PCI device + // subclass. If we're in a class block (i.e. the last parsed block + // header was for a PCI class), then we parse a programming + // interface block, otherwise we parse a subsystem block. + // + // A programming interface block looks like this: + // + // \t\t00 UHCI + // + // A subsystem block looks like this: + // + // \t\t0e11 4091 Smart Array 6i + if inClassBlock { + progIfaceID := string(lineBytes[2:4]) + progIfaceName := string(lineBytes[6:]) + curProgIface = &ProgrammingInterface{ + ID: progIfaceID, + Name: progIfaceName, + } + progIfaces = append(progIfaces, curProgIface) + } else { + vendorID := string(lineBytes[2:6]) + subsystemID := string(lineBytes[7:11]) + subsystemName := string(lineBytes[13:]) + curSubsystem = &Product{ + VendorID: vendorID, + ID: subsystemID, + Name: subsystemName, + } + productSubsystems = append(productSubsystems, curSubsystem) + } + } + } + return nil +} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 00000000..ebd5edd8 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,16 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 00000000..740fa931 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 00000000..bdd53191 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,55 @@ +# go-shellwords + +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) +[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") +// envs should be ["FOO=foo", "BAR=baz"] +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 00000000..a7deaca9 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 00000000..1b42a001 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,317 @@ +package shellwords + +import ( + "bytes" + "errors" + "os" + "strings" + "unicode" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + var buf bytes.Buffer + rs := []rune(s) + for i := 0; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + break + } + buf.WriteRune(rs[i]) + continue + } else if r == '$' { + i++ + if i == len(rs) { + buf.WriteRune(r) + break + } + if rs[i] == 0x7b { + i++ + p := i + for ; i < len(rs); i++ { + r = rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { + break + } + } + if r != 0x7d { + return s + } + if i > p { + buf.WriteString(getenv(s[p:i])) + } + } else { + p := i + for ; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { + break + } + } + if i > p { + buf.WriteString(getenv(s[p:i])) + i-- + } else { + buf.WriteString(s[p:]) + } + } + } else { + buf.WriteRune(r) + } + } + return buf.String() +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + Dir string + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + Dir: "", + } +} + +type argType int + +const ( + argNo argType = iota + argSingle + argQuoted +) + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := argNo + + i := -1 +loop: + for _, r := range line { + i++ + if escaped { + buf += string(r) + escaped = false + got = argSingle + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + buf = "" + got = argNo + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)] + out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)-2] + out + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + if doubleQuoted { + got = argQuoted + } + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + if singleQuoted { + got = argQuoted + } + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = argNo + } + } + pos = i + break loop + } + } + + got = argSingle + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { + _args, err := p.Parse(line) + if err != nil { + return nil, nil, err + } + envs = []string{} + args = []string{} + parsingEnv := true + for _, arg := range _args { + if parsingEnv && isEnv(arg) { + envs = append(envs, arg) + } else { + if parsingEnv { + parsingEnv = false + } + args = append(args, arg) + } + } + return envs, args, nil +} + +func isEnv(arg string) bool { + return len(strings.Split(arg, "=")) == 2 +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} + +func ParseWithEnvs(line string) (envs []string, args []string, err error) { + return NewParser().ParseWithEnvs(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 00000000..b56a9012 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,29 @@ +// +build !windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("SHELL"); shell == "" { + shell = "/bin/sh" + } + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 00000000..fd738a72 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("COMSPEC"); shell == "" { + shell = "cmd" + } + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/moby/locker/LICENSE b/vendor/github.com/moby/locker/LICENSE new file mode 100644 index 00000000..2e0ec1dc --- /dev/null +++ b/vendor/github.com/moby/locker/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/locker/README.md b/vendor/github.com/moby/locker/README.md new file mode 100644 index 00000000..a0852f0f --- /dev/null +++ b/vendor/github.com/moby/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/moby/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/moby/locker/locker.go b/vendor/github.com/moby/locker/locker.go new file mode 100644 index 00000000..0b22ddfa --- /dev/null +++ b/vendor/github.com/moby/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md new file mode 100644 index 00000000..bc60aa60 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.21.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.21.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.21.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go new file mode 100644 index 00000000..a9a15a1d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/attribute_group.go @@ -0,0 +1,1866 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - unix domain + // socket name, IPv4 or IPv6 address. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/tmp/my.sock', '10.1.2.80' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent client address behind + // any intermediaries (e.g. proxies) if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent client port behind any + // intermediaries (e.g. proxies) if it's available. + ClientPortKey = attribute.Key("client.port") + + // ClientSocketAddressKey is the attribute Key conforming to the + // "client.socket.address" semantic conventions. It represents the + // immediate client peer address - unix domain socket name, IPv4 or IPv6 + // address. + // + // Type: string + // RequirementLevel: Recommended (If different than `client.address`.) + // Stability: stable + // Examples: '/tmp/my.sock', '127.0.0.1' + ClientSocketAddressKey = attribute.Key("client.socket.address") + + // ClientSocketPortKey is the attribute Key conforming to the + // "client.socket.port" semantic conventions. It represents the immediate + // client peer port number + // + // Type: int + // RequirementLevel: Recommended (If different than `client.port`.) + // Stability: stable + // Examples: 35555 + ClientSocketPortKey = attribute.Key("client.socket.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// unix domain socket name, IPv4 or IPv6 address. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// ClientSocketAddress returns an attribute KeyValue conforming to the +// "client.socket.address" semantic conventions. It represents the immediate +// client peer address - unix domain socket name, IPv4 or IPv6 address. +func ClientSocketAddress(val string) attribute.KeyValue { + return ClientSocketAddressKey.String(val) +} + +// ClientSocketPort returns an attribute KeyValue conforming to the +// "client.socket.port" semantic conventions. It represents the immediate +// client peer port number +func ClientSocketPort(val int) attribute.KeyValue { + return ClientSocketPortKey.Int(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the deprecated, use + // `http.request.method` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the deprecated, + // use `http.response.status_code` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the deprecated, use `url.scheme` + // instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the deprecated, use `url.full` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + HTTPURLKey = attribute.Key("http.url") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the deprecated, use `url.path` and + // `url.query` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // deprecated, use `http.request.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // deprecated, use `http.response.body.size` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the deprecated, use +// `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the deprecated, use +// `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the deprecated, use `url.scheme` +// instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the deprecated, use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the deprecated, use `url.path` and +// `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the +// deprecated, use `http.request.body.size` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the +// deprecated, use `http.response.body.size` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the deprecated, + // use `server.socket.domain` on client spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address` on client spans and `client.socket.address` + // on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the deprecated, + // use `server.socket.port` on client spans and `client.socket.port` on + // server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the deprecated, use `server.address` + // on client spans and `client.address` on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the deprecated, use `server.port` on + // client spans and `client.port` on server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the deprecated, use + // `server.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the deprecated, use `server.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the deprecated, + // use `server.socket.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the deprecated, + // use `server.socket.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the deprecated, use + // `network.transport`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the deprecated, + // use `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the + // deprecated, use `network.protocol.version`. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the deprecated, + // use `network.transport` and `network.type`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + NetSockFamilyKey = attribute.Key("net.sock.family") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the deprecated, use +// `server.socket.domain` on client spans. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address` on client spans and `client.socket.address` on +// server spans. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the deprecated, use +// `server.socket.port` on client spans and `client.socket.port` on server +// spans. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the deprecated, use +// `server.address` on client spans and `client.address` on server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the deprecated, use +// `server.port` on client spans and `client.port` on server spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the deprecated, use +// `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the deprecated, use +// `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the deprecated, use +// `server.socket.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the deprecated, use +// `server.socket.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the deprecated, use +// `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the deprecated, +// use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // DestinationDomainKey is the attribute Key conforming to the + // "destination.domain" semantic conventions. It represents the domain name + // of the destination system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + DestinationDomainKey = attribute.Key("destination.domain") + + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the peer + // address, for example IP address or UNIX socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the peer port + // number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationDomain returns an attribute KeyValue conforming to the +// "destination.domain" semantic conventions. It represents the domain name of +// the destination system. +func DestinationDomain(val string) attribute.KeyValue { + return DestinationDomainKey.String(val) +} + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the peer address, +// for example IP address or UNIX socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the peer port number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes HTTP attributes. +const ( + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER` and, except if reporting + // a metric, MUST + // set the exact method received in the request line as value of the + // `http.request.method_original` attribute. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTP Server attributes +const ( + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // TypeKey is the attribute Key conforming to the "type" semantic + // conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + TypeKey = attribute.Key("type") + + // PoolKey is the attribute Key conforming to the "pool" semantic + // conventions. It represents the name of the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + PoolKey = attribute.Key("pool") +) + +var ( + // Heap memory + TypeHeap = TypeKey.String("heap") + // Non-heap memory + TypeNonHeap = TypeKey.String("non_heap") +) + +// Pool returns an attribute KeyValue conforming to the "pool" semantic +// conventions. It represents the name of the memory pool. +func Pool(val string) attribute.KeyValue { + return PoolKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API does not expose a +// clear notion of client and server). This also covers UDP network +// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) +// and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the logical server hostname, matches + // server FQDN if available, and IP or socket address if FQDN is not known. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the logical server port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + ServerPortKey = attribute.Key("server.port") + + // ServerSocketDomainKey is the attribute Key conforming to the + // "server.socket.domain" semantic conventions. It represents the domain + // name of an immediate peer. + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: 'proxy.example.com' + // Note: Typically observed from the client side, and represents a proxy or + // other intermediary domain name. + ServerSocketDomainKey = attribute.Key("server.socket.domain") + + // ServerSocketAddressKey is the attribute Key conforming to the + // "server.socket.address" semantic conventions. It represents the physical + // server IP address or Unix socket address. If set from the client, should + // simply use the socket's peer address, and not attempt to find any actual + // server IP (i.e., if set from client, this may represent some proxy + // server instead of the logical server). + // + // Type: string + // RequirementLevel: Recommended (If different than `server.address`.) + // Stability: stable + // Examples: '10.5.3.2' + ServerSocketAddressKey = attribute.Key("server.socket.address") + + // ServerSocketPortKey is the attribute Key conforming to the + // "server.socket.port" semantic conventions. It represents the physical + // server port. + // + // Type: int + // RequirementLevel: Recommended (If different than `server.port`.) + // Stability: stable + // Examples: 16456 + ServerSocketPortKey = attribute.Key("server.socket.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the logical server +// hostname, matches server FQDN if available, and IP or socket address if FQDN +// is not known. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the logical server port number +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// ServerSocketDomain returns an attribute KeyValue conforming to the +// "server.socket.domain" semantic conventions. It represents the domain name +// of an immediate peer. +func ServerSocketDomain(val string) attribute.KeyValue { + return ServerSocketDomainKey.String(val) +} + +// ServerSocketAddress returns an attribute KeyValue conforming to the +// "server.socket.address" semantic conventions. It represents the physical +// server IP address or Unix socket address. If set from the client, should +// simply use the socket's peer address, and not attempt to find any actual +// server IP (i.e., if set from client, this may represent some proxy server +// instead of the logical server). +func ServerSocketAddress(val string) attribute.KeyValue { + return ServerSocketAddressKey.String(val) +} + +// ServerSocketPort returns an attribute KeyValue conforming to the +// "server.socket.port" semantic conventions. It represents the physical server +// port. +func ServerSocketPort(val int) attribute.KeyValue { + return ServerSocketPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API does not expose a clear notion +// of client and server. +const ( + // SourceDomainKey is the attribute Key conforming to the "source.domain" + // semantic conventions. It represents the domain name of the source + // system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'foo.example.com' + // Note: This value may be a host name, a fully qualified domain name, or + // another host naming format. + SourceDomainKey = attribute.Key("source.domain") + + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address, for example IP + // address or Unix socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.5.3.2' + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceDomain returns an attribute KeyValue conforming to the +// "source.domain" semantic conventions. It represents the domain name of the +// source system. +func SourceDomain(val string) attribute.KeyValue { + return SourceDomainKey.String(val) +} + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address, for +// example IP address or Unix socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // Transport Layer](https://osi-model.com/transport-layer/) or + // [Inter-process Communication + // method](https://en.wikipedia.org/wiki/Inter-process_communication). The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI Network + // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The + // value SHOULD be normalized to lowercase. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + NetworkTypeKey = attribute.Key("network.type") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // Application Layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. See note below + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// Application Layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. The value SHOULD be normalized to lowercase. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's different + // than `http.request.method`.) + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") +) + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Attributes describing URL. +const ( + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it should be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password should be redacted and attribute's value should be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: When missing, the value is assumed to be `/` + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") +) + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go new file mode 100644 index 00000000..461331a5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.21.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go new file mode 100644 index 00000000..c09d9317 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go new file mode 100644 index 00000000..5184ee33 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go new file mode 100644 index 00000000..f7aaa50b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/resource.go @@ -0,0 +1,2299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") + + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") +) + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // OCI defines a digest of manifest. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S does not have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go new file mode 100644 index 00000000..be07217d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go new file mode 100644 index 00000000..55698cc4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/trace.go @@ -0,0 +1,2484 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended (If a client id is available) + // Stability: stable + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..b618162a --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,160 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + done := ctx.Done() + + s.mu.Lock() + select { + case <-done: + // ctx becoming done has "happened before" acquiring the semaphore, + // whether it became done before the call began or while we were + // waiting for the mutex. We prefer to fail even if we could acquire + // the mutex without blocking. + s.mu.Unlock() + return ctx.Err() + default: + } + if s.size-s.cur >= n && s.waiters.Len() == 0 { + // Since we hold s.mu and haven't synchronized since checking done, if + // ctx becomes done before we return here, it becoming done must have + // "happened concurrently" with this call - it cannot "happen before" + // we return in this branch. So, we're ok to always acquire here. + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-done + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-done: + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. + // Pretend we didn't and put the tokens back. + s.cur -= n + s.notifyWaiters() + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return ctx.Err() + + case <-ready: + // Acquired the semaphore. Check that ctx isn't already done. + // We check the done channel instead of calling ctx.Err because we + // already have the channel, and ctx.Err is O(n) with the nesting + // depth of ctx. + select { + case <-done: + s.Release(n) + return ctx.Err() + default: + } + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/howett.net/plist/.gitignore b/vendor/howett.net/plist/.gitignore new file mode 100644 index 00000000..3743b346 --- /dev/null +++ b/vendor/howett.net/plist/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.wasm + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/howett.net/plist/.gitlab-ci.yml b/vendor/howett.net/plist/.gitlab-ci.yml new file mode 100644 index 00000000..11d6dbf7 --- /dev/null +++ b/vendor/howett.net/plist/.gitlab-ci.yml @@ -0,0 +1,39 @@ +image: golang:alpine +stages: + - test + +variables: + GO_PACKAGE: "howett.net/plist" + +before_script: + - "mkdir -p $(dirname $GOPATH/src/$GO_PACKAGE)" + - "ln -s $(pwd) $GOPATH/src/$GO_PACKAGE" + - "cd $GOPATH/src/$GO_PACKAGE" + +.template:go-test: &template-go-test + stage: test + script: + - go test + +go-test-cover:latest: + stage: test + script: + - go test -v -cover + coverage: '/^coverage: \d+\.\d+/' + +go-test-appengine:latest: + stage: test + script: + - go test -tags appengine + +go-test:1.6: + <<: *template-go-test + image: golang:1.6-alpine + +go-test:1.4: + <<: *template-go-test + image: golang:1.4-alpine + +go-test:1.2: + <<: *template-go-test + image: golang:1.2 diff --git a/vendor/howett.net/plist/LICENSE b/vendor/howett.net/plist/LICENSE new file mode 100644 index 00000000..9f6012f3 --- /dev/null +++ b/vendor/howett.net/plist/LICENSE @@ -0,0 +1,58 @@ +Copyright (c) 2013, Dustin L. Howett. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of the FreeBSD Project. + +-------------------------------------------------------------------------------- +Parts of this package were made available under the license covering +the Go language and all attended core libraries. That license follows. +-------------------------------------------------------------------------------- + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/howett.net/plist/README.md b/vendor/howett.net/plist/README.md new file mode 100644 index 00000000..d751c062 --- /dev/null +++ b/vendor/howett.net/plist/README.md @@ -0,0 +1,21 @@ +# plist - A pure Go property list transcoder [![coverage report](https://gitlab.howett.net/go/plist/badges/main/coverage.svg)](https://gitlab.howett.net/go/plist/commits/main) +## INSTALL +``` +$ go get howett.net/plist +``` + +## FEATURES +* Supports encoding/decoding property lists (Apple XML, Apple Binary, OpenStep and GNUStep) from/to arbitrary Go types + +## USE +```go +package main +import ( + "howett.net/plist" + "os" +) +func main() { + encoder := plist.NewEncoder(os.Stdout) + encoder.Encode(map[string]string{"hello": "world"}) +} +``` diff --git a/vendor/howett.net/plist/bplist.go b/vendor/howett.net/plist/bplist.go new file mode 100644 index 00000000..962793a9 --- /dev/null +++ b/vendor/howett.net/plist/bplist.go @@ -0,0 +1,26 @@ +package plist + +type bplistTrailer struct { + Unused [5]uint8 + SortVersion uint8 + OffsetIntSize uint8 + ObjectRefSize uint8 + NumObjects uint64 + TopObject uint64 + OffsetTableOffset uint64 +} + +const ( + bpTagNull uint8 = 0x00 + bpTagBoolFalse = 0x08 + bpTagBoolTrue = 0x09 + bpTagInteger = 0x10 + bpTagReal = 0x20 + bpTagDate = 0x30 + bpTagData = 0x40 + bpTagASCIIString = 0x50 + bpTagUTF16String = 0x60 + bpTagUID = 0x80 + bpTagArray = 0xA0 + bpTagDictionary = 0xD0 +) diff --git a/vendor/howett.net/plist/bplist_generator.go b/vendor/howett.net/plist/bplist_generator.go new file mode 100644 index 00000000..09ab71b1 --- /dev/null +++ b/vendor/howett.net/plist/bplist_generator.go @@ -0,0 +1,303 @@ +package plist + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "time" + "unicode/utf16" +) + +func bplistMinimumIntSize(n uint64) int { + switch { + case n <= uint64(0xff): + return 1 + case n <= uint64(0xffff): + return 2 + case n <= uint64(0xffffffff): + return 4 + default: + return 8 + } +} + +func bplistValueShouldUnique(pval cfValue) bool { + switch pval.(type) { + case cfString, *cfNumber, *cfReal, cfDate, cfData: + return true + } + return false +} + +type bplistGenerator struct { + writer *countedWriter + objmap map[interface{}]uint64 // maps pValue.hash()es to object locations + objtable []cfValue + trailer bplistTrailer +} + +func (p *bplistGenerator) flattenPlistValue(pval cfValue) { + key := pval.hash() + if bplistValueShouldUnique(pval) { + if _, ok := p.objmap[key]; ok { + return + } + } + + p.objmap[key] = uint64(len(p.objtable)) + p.objtable = append(p.objtable, pval) + + switch pval := pval.(type) { + case *cfDictionary: + pval.sort() + for _, k := range pval.keys { + p.flattenPlistValue(cfString(k)) + } + for _, v := range pval.values { + p.flattenPlistValue(v) + } + case *cfArray: + for _, v := range pval.values { + p.flattenPlistValue(v) + } + } +} + +func (p *bplistGenerator) indexForPlistValue(pval cfValue) (uint64, bool) { + v, ok := p.objmap[pval.hash()] + return v, ok +} + +func (p *bplistGenerator) generateDocument(root cfValue) { + p.objtable = make([]cfValue, 0, 16) + p.objmap = make(map[interface{}]uint64) + p.flattenPlistValue(root) + + p.trailer.NumObjects = uint64(len(p.objtable)) + p.trailer.ObjectRefSize = uint8(bplistMinimumIntSize(p.trailer.NumObjects)) + + p.writer.Write([]byte("bplist00")) + + offtable := make([]uint64, p.trailer.NumObjects) + for i, pval := range p.objtable { + offtable[i] = uint64(p.writer.BytesWritten()) + p.writePlistValue(pval) + } + + p.trailer.OffsetIntSize = uint8(bplistMinimumIntSize(uint64(p.writer.BytesWritten()))) + p.trailer.TopObject = p.objmap[root.hash()] + p.trailer.OffsetTableOffset = uint64(p.writer.BytesWritten()) + + for _, offset := range offtable { + p.writeSizedInt(offset, int(p.trailer.OffsetIntSize)) + } + + binary.Write(p.writer, binary.BigEndian, p.trailer) +} + +func (p *bplistGenerator) writePlistValue(pval cfValue) { + if pval == nil { + return + } + + switch pval := pval.(type) { + case *cfDictionary: + p.writeDictionaryTag(pval) + case *cfArray: + p.writeArrayTag(pval.values) + case cfString: + p.writeStringTag(string(pval)) + case *cfNumber: + p.writeIntTag(pval.signed, pval.value) + case *cfReal: + if pval.wide { + p.writeRealTag(pval.value, 64) + } else { + p.writeRealTag(pval.value, 32) + } + case cfBoolean: + p.writeBoolTag(bool(pval)) + case cfData: + p.writeDataTag([]byte(pval)) + case cfDate: + p.writeDateTag(time.Time(pval)) + case cfUID: + p.writeUIDTag(UID(pval)) + default: + panic(fmt.Errorf("unknown plist type %t", pval)) + } +} + +func (p *bplistGenerator) writeSizedInt(n uint64, nbytes int) { + var val interface{} + switch nbytes { + case 1: + val = uint8(n) + case 2: + val = uint16(n) + case 4: + val = uint32(n) + case 8: + val = n + default: + panic(errors.New("illegal integer size")) + } + binary.Write(p.writer, binary.BigEndian, val) +} + +func (p *bplistGenerator) writeBoolTag(v bool) { + tag := uint8(bpTagBoolFalse) + if v { + tag = bpTagBoolTrue + } + binary.Write(p.writer, binary.BigEndian, tag) +} + +func (p *bplistGenerator) writeIntTag(signed bool, n uint64) { + var tag uint8 + var val interface{} + switch { + case n <= uint64(0xff): + val = uint8(n) + tag = bpTagInteger | 0x0 + case n <= uint64(0xffff): + val = uint16(n) + tag = bpTagInteger | 0x1 + case n <= uint64(0xffffffff): + val = uint32(n) + tag = bpTagInteger | 0x2 + case n > uint64(0x7fffffffffffffff) && !signed: + // 64-bit values are always *signed* in format 00. + // Any unsigned value that doesn't intersect with the signed + // range must be sign-extended and stored as a SInt128 + val = n + tag = bpTagInteger | 0x4 + default: + val = n + tag = bpTagInteger | 0x3 + } + + binary.Write(p.writer, binary.BigEndian, tag) + if tag&0xF == 0x4 { + // SInt128; in the absence of true 128-bit integers in Go, + // we'll just fake the top half. We only got here because + // we had an unsigned 64-bit int that didn't fit, + // so sign extend it with zeroes. + binary.Write(p.writer, binary.BigEndian, uint64(0)) + } + binary.Write(p.writer, binary.BigEndian, val) +} + +func (p *bplistGenerator) writeUIDTag(u UID) { + nbytes := bplistMinimumIntSize(uint64(u)) + tag := uint8(bpTagUID | (nbytes - 1)) + + binary.Write(p.writer, binary.BigEndian, tag) + p.writeSizedInt(uint64(u), nbytes) +} + +func (p *bplistGenerator) writeRealTag(n float64, bits int) { + var tag uint8 = bpTagReal | 0x3 + var val interface{} = n + if bits == 32 { + val = float32(n) + tag = bpTagReal | 0x2 + } + + binary.Write(p.writer, binary.BigEndian, tag) + binary.Write(p.writer, binary.BigEndian, val) +} + +func (p *bplistGenerator) writeDateTag(t time.Time) { + tag := uint8(bpTagDate) | 0x3 + val := float64(t.In(time.UTC).UnixNano()) / float64(time.Second) + val -= 978307200 // Adjust to Apple Epoch + + binary.Write(p.writer, binary.BigEndian, tag) + binary.Write(p.writer, binary.BigEndian, val) +} + +func (p *bplistGenerator) writeCountedTag(tag uint8, count uint64) { + marker := tag + if count >= 0xF { + marker |= 0xF + } else { + marker |= uint8(count) + } + + binary.Write(p.writer, binary.BigEndian, marker) + + if count >= 0xF { + p.writeIntTag(false, count) + } +} + +func (p *bplistGenerator) writeDataTag(data []byte) { + p.writeCountedTag(bpTagData, uint64(len(data))) + binary.Write(p.writer, binary.BigEndian, data) +} + +func (p *bplistGenerator) writeStringTag(str string) { + for _, r := range str { + if r > 0x7F { + utf16Runes := utf16.Encode([]rune(str)) + p.writeCountedTag(bpTagUTF16String, uint64(len(utf16Runes))) + binary.Write(p.writer, binary.BigEndian, utf16Runes) + return + } + } + + p.writeCountedTag(bpTagASCIIString, uint64(len(str))) + binary.Write(p.writer, binary.BigEndian, []byte(str)) +} + +func (p *bplistGenerator) writeDictionaryTag(dict *cfDictionary) { + // assumption: sorted already; flattenPlistValue did this. + cnt := len(dict.keys) + p.writeCountedTag(bpTagDictionary, uint64(cnt)) + vals := make([]uint64, cnt*2) + for i, k := range dict.keys { + // invariant: keys have already been "uniqued" (as PStrings) + keyIdx, ok := p.objmap[cfString(k).hash()] + if !ok { + panic(errors.New("failed to find key " + k + " in object map during serialization")) + } + vals[i] = keyIdx + } + + for i, v := range dict.values { + // invariant: values have already been "uniqued" + objIdx, ok := p.indexForPlistValue(v) + if !ok { + panic(errors.New("failed to find value in object map during serialization")) + } + vals[i+cnt] = objIdx + } + + for _, v := range vals { + p.writeSizedInt(v, int(p.trailer.ObjectRefSize)) + } +} + +func (p *bplistGenerator) writeArrayTag(arr []cfValue) { + p.writeCountedTag(bpTagArray, uint64(len(arr))) + for _, v := range arr { + objIdx, ok := p.indexForPlistValue(v) + if !ok { + panic(errors.New("failed to find value in object map during serialization")) + } + + p.writeSizedInt(objIdx, int(p.trailer.ObjectRefSize)) + } +} + +func (p *bplistGenerator) Indent(i string) { + // There's nothing to indent. +} + +func newBplistGenerator(w io.Writer) *bplistGenerator { + return &bplistGenerator{ + writer: &countedWriter{Writer: mustWriter{w}}, + } +} diff --git a/vendor/howett.net/plist/bplist_parser.go b/vendor/howett.net/plist/bplist_parser.go new file mode 100644 index 00000000..1825b570 --- /dev/null +++ b/vendor/howett.net/plist/bplist_parser.go @@ -0,0 +1,353 @@ +package plist + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "time" + "unicode/utf16" +) + +const ( + signedHighBits = 0xFFFFFFFFFFFFFFFF +) + +type offset uint64 + +type bplistParser struct { + buffer []byte + + reader io.ReadSeeker + version int + objects []cfValue // object ID to object + trailer bplistTrailer + trailerOffset uint64 + + containerStack []offset // slice of object offsets; manipulated during container deserialization +} + +func (p *bplistParser) validateDocumentTrailer() { + if p.trailer.OffsetTableOffset >= p.trailerOffset { + panic(fmt.Errorf("offset table beyond beginning of trailer (0x%x, trailer@0x%x)", p.trailer.OffsetTableOffset, p.trailerOffset)) + } + + if p.trailer.OffsetTableOffset < 9 { + panic(fmt.Errorf("offset table begins inside header (0x%x)", p.trailer.OffsetTableOffset)) + } + + if p.trailerOffset > (p.trailer.NumObjects*uint64(p.trailer.OffsetIntSize))+p.trailer.OffsetTableOffset { + panic(errors.New("garbage between offset table and trailer")) + } + + if p.trailer.OffsetTableOffset+(uint64(p.trailer.OffsetIntSize)*p.trailer.NumObjects) > p.trailerOffset { + panic(errors.New("offset table isn't long enough to address every object")) + } + + maxObjectRef := uint64(1) << (8 * p.trailer.ObjectRefSize) + if p.trailer.NumObjects > maxObjectRef { + panic(fmt.Errorf("more objects (%v) than object ref size (%v bytes) can support", p.trailer.NumObjects, p.trailer.ObjectRefSize)) + } + + if p.trailer.OffsetIntSize < uint8(8) && (uint64(1)<<(8*p.trailer.OffsetIntSize)) <= p.trailer.OffsetTableOffset { + panic(errors.New("offset size isn't big enough to address entire file")) + } + + if p.trailer.TopObject >= p.trailer.NumObjects { + panic(fmt.Errorf("top object #%d is out of range (only %d exist)", p.trailer.TopObject, p.trailer.NumObjects)) + } +} + +func (p *bplistParser) parseDocument() (pval cfValue, parseError error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + + parseError = plistParseError{"binary", r.(error)} + } + }() + + p.buffer, _ = ioutil.ReadAll(p.reader) + + l := len(p.buffer) + if l < 40 { + panic(errors.New("not enough data")) + } + + if !bytes.Equal(p.buffer[0:6], []byte{'b', 'p', 'l', 'i', 's', 't'}) { + panic(errors.New("incomprehensible magic")) + } + + p.version = int(((p.buffer[6] - '0') * 10) + (p.buffer[7] - '0')) + + if p.version > 1 { + panic(fmt.Errorf("unexpected version %d", p.version)) + } + + p.trailerOffset = uint64(l - 32) + p.trailer = bplistTrailer{ + SortVersion: p.buffer[p.trailerOffset+5], + OffsetIntSize: p.buffer[p.trailerOffset+6], + ObjectRefSize: p.buffer[p.trailerOffset+7], + NumObjects: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+8:]), + TopObject: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+16:]), + OffsetTableOffset: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+24:]), + } + + p.validateDocumentTrailer() + + // INVARIANTS: + // - Entire offset table is before trailer + // - Offset table begins after header + // - Offset table can address entire document + // - Object IDs are big enough to support the number of objects in this plist + // - Top object is in range + + p.objects = make([]cfValue, p.trailer.NumObjects) + + pval = p.objectAtIndex(p.trailer.TopObject) + return +} + +// parseSizedInteger returns a 128-bit integer as low64, high64 +func (p *bplistParser) parseSizedInteger(off offset, nbytes int) (lo uint64, hi uint64, newOffset offset) { + // Per comments in CoreFoundation, format version 00 requires that all + // 1, 2 or 4-byte integers be interpreted as unsigned. 8-byte integers are + // signed (always?) and therefore must be sign extended here. + // negative 1, 2, or 4-byte integers are always emitted as 64-bit. + switch nbytes { + case 1: + lo, hi = uint64(p.buffer[off]), 0 + case 2: + lo, hi = uint64(binary.BigEndian.Uint16(p.buffer[off:])), 0 + case 4: + lo, hi = uint64(binary.BigEndian.Uint32(p.buffer[off:])), 0 + case 8: + lo = binary.BigEndian.Uint64(p.buffer[off:]) + if p.buffer[off]&0x80 != 0 { + // sign extend if lo is signed + hi = signedHighBits + } + case 16: + lo, hi = binary.BigEndian.Uint64(p.buffer[off+8:]), binary.BigEndian.Uint64(p.buffer[off:]) + default: + panic(errors.New("illegal integer size")) + } + newOffset = off + offset(nbytes) + return +} + +func (p *bplistParser) parseObjectRefAtOffset(off offset) (uint64, offset) { + oid, _, next := p.parseSizedInteger(off, int(p.trailer.ObjectRefSize)) + return oid, next +} + +func (p *bplistParser) parseOffsetAtOffset(off offset) (offset, offset) { + parsedOffset, _, next := p.parseSizedInteger(off, int(p.trailer.OffsetIntSize)) + return offset(parsedOffset), next +} + +func (p *bplistParser) objectAtIndex(index uint64) cfValue { + if index >= p.trailer.NumObjects { + panic(fmt.Errorf("invalid object#%d (max %d)", index, p.trailer.NumObjects)) + } + + if pval := p.objects[index]; pval != nil { + return pval + } + + off, _ := p.parseOffsetAtOffset(offset(p.trailer.OffsetTableOffset + (index * uint64(p.trailer.OffsetIntSize)))) + if off > offset(p.trailer.OffsetTableOffset-1) { + panic(fmt.Errorf("object#%d starts beyond beginning of object table (0x%x, table@0x%x)", index, off, p.trailer.OffsetTableOffset)) + } + + pval := p.parseTagAtOffset(off) + p.objects[index] = pval + return pval + +} + +func (p *bplistParser) pushNestedObject(off offset) { + for _, v := range p.containerStack { + if v == off { + p.panicNestedObject(off) + } + } + p.containerStack = append(p.containerStack, off) +} + +func (p *bplistParser) panicNestedObject(off offset) { + ids := "" + for _, v := range p.containerStack { + ids += fmt.Sprintf("0x%x > ", v) + } + + // %s0x%d: ids above ends with " > " + panic(fmt.Errorf("self-referential collection@0x%x (%s0x%x) cannot be deserialized", off, ids, off)) +} + +func (p *bplistParser) popNestedObject() { + p.containerStack = p.containerStack[:len(p.containerStack)-1] +} + +func (p *bplistParser) parseTagAtOffset(off offset) cfValue { + tag := p.buffer[off] + + switch tag & 0xF0 { + case bpTagNull: + switch tag & 0x0F { + case bpTagBoolTrue, bpTagBoolFalse: + return cfBoolean(tag == bpTagBoolTrue) + } + case bpTagInteger: + lo, hi, _ := p.parseIntegerAtOffset(off) + return &cfNumber{ + signed: hi == signedHighBits, // a signed integer is stored as a 128-bit integer with the top 64 bits set + value: lo, + } + case bpTagReal: + nbytes := 1 << (tag & 0x0F) + switch nbytes { + case 4: + bits := binary.BigEndian.Uint32(p.buffer[off+1:]) + return &cfReal{wide: false, value: float64(math.Float32frombits(bits))} + case 8: + bits := binary.BigEndian.Uint64(p.buffer[off+1:]) + return &cfReal{wide: true, value: math.Float64frombits(bits)} + } + panic(errors.New("illegal float size")) + case bpTagDate: + bits := binary.BigEndian.Uint64(p.buffer[off+1:]) + val := math.Float64frombits(bits) + + // Apple Epoch is 20110101000000Z + // Adjust for UNIX Time + val += 978307200 + + sec, fsec := math.Modf(val) + time := time.Unix(int64(sec), int64(fsec*float64(time.Second))).In(time.UTC) + return cfDate(time) + case bpTagData: + data := p.parseDataAtOffset(off) + return cfData(data) + case bpTagASCIIString: + str := p.parseASCIIStringAtOffset(off) + return cfString(str) + case bpTagUTF16String: + str := p.parseUTF16StringAtOffset(off) + return cfString(str) + case bpTagUID: // Somehow different than int: low half is nbytes - 1 instead of log2(nbytes) + lo, _, _ := p.parseSizedInteger(off+1, int(tag&0xF)+1) + return cfUID(lo) + case bpTagDictionary: + return p.parseDictionaryAtOffset(off) + case bpTagArray: + return p.parseArrayAtOffset(off) + } + panic(fmt.Errorf("unexpected atom 0x%2.02x at offset 0x%x", tag, off)) +} + +func (p *bplistParser) parseIntegerAtOffset(off offset) (uint64, uint64, offset) { + tag := p.buffer[off] + return p.parseSizedInteger(off+1, 1<<(tag&0xF)) +} + +func (p *bplistParser) countForTagAtOffset(off offset) (uint64, offset) { + tag := p.buffer[off] + cnt := uint64(tag & 0x0F) + if cnt == 0xF { + cnt, _, off = p.parseIntegerAtOffset(off + 1) + return cnt, off + } + return cnt, off + 1 +} + +func (p *bplistParser) parseDataAtOffset(off offset) []byte { + len, start := p.countForTagAtOffset(off) + if start+offset(len) > offset(p.trailer.OffsetTableOffset) { + panic(fmt.Errorf("data@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start))) + } + return p.buffer[start : start+offset(len)] +} + +func (p *bplistParser) parseASCIIStringAtOffset(off offset) string { + len, start := p.countForTagAtOffset(off) + if start+offset(len) > offset(p.trailer.OffsetTableOffset) { + panic(fmt.Errorf("ascii string@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start))) + } + + return zeroCopy8BitString(p.buffer, int(start), int(len)) +} + +func (p *bplistParser) parseUTF16StringAtOffset(off offset) string { + len, start := p.countForTagAtOffset(off) + bytes := len * 2 + if start+offset(bytes) > offset(p.trailer.OffsetTableOffset) { + panic(fmt.Errorf("utf16 string@0x%x too long (%v bytes, max is %v)", off, bytes, p.trailer.OffsetTableOffset-uint64(start))) + } + + u16s := make([]uint16, len) + for i := offset(0); i < offset(len); i++ { + u16s[i] = binary.BigEndian.Uint16(p.buffer[start+(i*2):]) + } + runes := utf16.Decode(u16s) + return string(runes) +} + +func (p *bplistParser) parseObjectListAtOffset(off offset, count uint64) []cfValue { + if off+offset(count*uint64(p.trailer.ObjectRefSize)) > offset(p.trailer.OffsetTableOffset) { + panic(fmt.Errorf("list@0x%x length (%v) puts its end beyond the offset table at 0x%x", off, count, p.trailer.OffsetTableOffset)) + } + objects := make([]cfValue, count) + + next := off + var oid uint64 + for i := uint64(0); i < count; i++ { + oid, next = p.parseObjectRefAtOffset(next) + objects[i] = p.objectAtIndex(oid) + } + + return objects +} + +func (p *bplistParser) parseDictionaryAtOffset(off offset) *cfDictionary { + p.pushNestedObject(off) + defer p.popNestedObject() + + // a dictionary is an object list of [key key key val val val] + cnt, start := p.countForTagAtOffset(off) + objects := p.parseObjectListAtOffset(start, cnt*2) + + keys := make([]string, cnt) + for i := uint64(0); i < cnt; i++ { + if str, ok := objects[i].(cfString); ok { + keys[i] = string(str) + } else { + panic(fmt.Errorf("dictionary@0x%x contains non-string key at index %d", off, i)) + } + } + + return &cfDictionary{ + keys: keys, + values: objects[cnt:], + } +} + +func (p *bplistParser) parseArrayAtOffset(off offset) *cfArray { + p.pushNestedObject(off) + defer p.popNestedObject() + + // an array is just an object list + cnt, start := p.countForTagAtOffset(off) + return &cfArray{p.parseObjectListAtOffset(start, cnt)} +} + +func newBplistParser(r io.ReadSeeker) *bplistParser { + return &bplistParser{reader: r} +} diff --git a/vendor/howett.net/plist/decode.go b/vendor/howett.net/plist/decode.go new file mode 100644 index 00000000..4c646677 --- /dev/null +++ b/vendor/howett.net/plist/decode.go @@ -0,0 +1,119 @@ +package plist + +import ( + "bytes" + "io" + "reflect" + "runtime" +) + +type parser interface { + parseDocument() (cfValue, error) +} + +// A Decoder reads a property list from an input stream. +type Decoder struct { + // the format of the most-recently-decoded property list + Format int + + reader io.ReadSeeker + lax bool +} + +// Decode works like Unmarshal, except it reads the decoder stream to find property list elements. +// +// After Decoding, the Decoder's Format field will be set to one of the plist format constants. +func (p *Decoder) Decode(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + header := make([]byte, 6) + p.reader.Read(header) + p.reader.Seek(0, 0) + + var parser parser + var pval cfValue + if bytes.Equal(header, []byte("bplist")) { + parser = newBplistParser(p.reader) + pval, err = parser.parseDocument() + if err != nil { + // Had a bplist header, but still got an error: we have to die here. + return err + } + p.Format = BinaryFormat + } else { + parser = newXMLPlistParser(p.reader) + pval, err = parser.parseDocument() + if _, ok := err.(invalidPlistError); ok { + // Rewind: the XML parser might have exhausted the file. + p.reader.Seek(0, 0) + // We don't use parser here because we want the textPlistParser type + tp := newTextPlistParser(p.reader) + pval, err = tp.parseDocument() + if err != nil { + return err + } + p.Format = tp.format + if p.Format == OpenStepFormat { + // OpenStep property lists can only store strings, + // so we have to turn on lax mode here for the unmarshal step later. + p.lax = true + } + } else { + if err != nil { + return err + } + p.Format = XMLFormat + } + } + + p.unmarshal(pval, reflect.ValueOf(v)) + return +} + +// NewDecoder returns a Decoder that reads property list elements from a stream reader, r. +// NewDecoder requires a Seekable stream for the purposes of file type detection. +func NewDecoder(r io.ReadSeeker) *Decoder { + return &Decoder{Format: InvalidFormat, reader: r, lax: false} +} + +// Unmarshal parses a property list document and stores the result in the value pointed to by v. +// +// Unmarshal uses the inverse of the type encodings that Marshal uses, allocating heap-borne types as necessary. +// +// When given a nil pointer, Unmarshal allocates a new value for it to point to. +// +// To decode property list values into an interface value, Unmarshal decodes the property list into the concrete value contained +// in the interface value. If the interface value is nil, Unmarshal stores one of the following in the interface value: +// +// string, bool, uint64, float64 +// plist.UID for "CoreFoundation Keyed Archiver UIDs" (convertible to uint64) +// []byte, for plist data +// []interface{}, for plist arrays +// map[string]interface{}, for plist dictionaries +// +// If a property list value is not appropriate for a given value type, Unmarshal aborts immediately and returns an error. +// +// As Go does not support 128-bit types, and we don't want to pretend we're giving the user integer types (as opposed to +// secretly passing them structs), Unmarshal will drop the high 64 bits of any 128-bit integers encoded in binary property lists. +// (This is important because CoreFoundation serializes some large 64-bit values as 128-bit values with an empty high half.) +// +// When Unmarshal encounters an OpenStep property list, it will enter a relaxed parsing mode: OpenStep property lists can only store +// plain old data as strings, so we will attempt to recover integer, floating-point, boolean and date values wherever they are necessary. +// (for example, if Unmarshal attempts to unmarshal an OpenStep property list into a time.Time, it will try to parse the string it +// receives as a time.) +// +// Unmarshal returns the detected property list format and an error, if any. +func Unmarshal(data []byte, v interface{}) (format int, err error) { + r := bytes.NewReader(data) + dec := NewDecoder(r) + err = dec.Decode(v) + format = dec.Format + return +} diff --git a/vendor/howett.net/plist/doc.go b/vendor/howett.net/plist/doc.go new file mode 100644 index 00000000..457e60b6 --- /dev/null +++ b/vendor/howett.net/plist/doc.go @@ -0,0 +1,5 @@ +// Package plist implements encoding and decoding of Apple's "property list" format. +// Property lists come in three sorts: plain text (GNUStep and OpenStep), XML and binary. +// plist supports all of them. +// The mapping between property list and Go objects is described in the documentation for the Marshal and Unmarshal functions. +package plist diff --git a/vendor/howett.net/plist/encode.go b/vendor/howett.net/plist/encode.go new file mode 100644 index 00000000..f81309b5 --- /dev/null +++ b/vendor/howett.net/plist/encode.go @@ -0,0 +1,126 @@ +package plist + +import ( + "bytes" + "errors" + "io" + "reflect" + "runtime" +) + +type generator interface { + generateDocument(cfValue) + Indent(string) +} + +// An Encoder writes a property list to an output stream. +type Encoder struct { + writer io.Writer + format int + + indent string +} + +// Encode writes the property list encoding of v to the stream. +func (p *Encoder) Encode(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + pval := p.marshal(reflect.ValueOf(v)) + if pval == nil { + panic(errors.New("plist: no root element to encode")) + } + + var g generator + switch p.format { + case XMLFormat: + g = newXMLPlistGenerator(p.writer) + case BinaryFormat, AutomaticFormat: + g = newBplistGenerator(p.writer) + case OpenStepFormat, GNUStepFormat: + g = newTextPlistGenerator(p.writer, p.format) + } + g.Indent(p.indent) + g.generateDocument(pval) + return +} + +// Indent turns on pretty-printing for the XML and Text property list formats. +// Each element begins on a new line and is preceded by one or more copies of indent according to its nesting depth. +func (p *Encoder) Indent(indent string) { + p.indent = indent +} + +// NewEncoder returns an Encoder that writes an XML property list to w. +func NewEncoder(w io.Writer) *Encoder { + return NewEncoderForFormat(w, XMLFormat) +} + +// NewEncoderForFormat returns an Encoder that writes a property list to w in the specified format. +// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat). +func NewEncoderForFormat(w io.Writer, format int) *Encoder { + return &Encoder{ + writer: w, + format: format, + } +} + +// NewBinaryEncoder returns an Encoder that writes a binary property list to w. +func NewBinaryEncoder(w io.Writer) *Encoder { + return NewEncoderForFormat(w, BinaryFormat) +} + +// Marshal returns the property list encoding of v in the specified format. +// +// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat). +// +// Marshal traverses the value v recursively. +// Any nil values encountered, other than the root, will be silently discarded as +// the property list format bears no representation for nil values. +// +// Strings, integers of varying size, floats and booleans are encoded unchanged. +// Strings bearing non-ASCII runes will be encoded differently depending upon the property list format: +// UTF-8 for XML property lists and UTF-16 for binary property lists. +// +// Slice and Array values are encoded as property list arrays, except for +// []byte values, which are encoded as data. +// +// Map values encode as dictionaries. The map's key type must be string; there is no provision for encoding non-string dictionary keys. +// +// Struct values are encoded as dictionaries, with only exported fields being serialized. Struct field encoding may be influenced with the use of tags. +// The tag format is: +// +// `plist:"[,flags...]"` +// +// The following flags are supported: +// +// omitempty Only include the field if it is not set to the zero value for its type. +// +// If the key is "-", the field is ignored. +// +// Anonymous struct fields are encoded as if their exported fields were exposed via the outer struct. +// +// Pointer values encode as the value pointed to. +// +// Channel, complex and function values cannot be encoded. Any attempt to do so causes Marshal to return an error. +func Marshal(v interface{}, format int) ([]byte, error) { + return MarshalIndent(v, format, "") +} + +// MarshalIndent works like Marshal, but each property list element +// begins on a new line and is preceded by one or more copies of indent according to its nesting depth. +func MarshalIndent(v interface{}, format int, indent string) ([]byte, error) { + buf := &bytes.Buffer{} + enc := NewEncoderForFormat(buf, format) + enc.Indent(indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/howett.net/plist/fuzz.go b/vendor/howett.net/plist/fuzz.go new file mode 100644 index 00000000..18a3b4b9 --- /dev/null +++ b/vendor/howett.net/plist/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package plist + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + buf := bytes.NewReader(data) + + var obj interface{} + if err := NewDecoder(buf).Decode(&obj); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/howett.net/plist/marshal.go b/vendor/howett.net/plist/marshal.go new file mode 100644 index 00000000..e237d20a --- /dev/null +++ b/vendor/howett.net/plist/marshal.go @@ -0,0 +1,187 @@ +package plist + +import ( + "encoding" + "reflect" + "time" +) + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +var ( + plistMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +func implementsInterface(val reflect.Value, interfaceType reflect.Type) (interface{}, bool) { + if val.CanInterface() && val.Type().Implements(interfaceType) { + return val.Interface(), true + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(interfaceType) { + return pv.Interface(), true + } + } + return nil, false +} + +func (p *Encoder) marshalPlistInterface(marshalable Marshaler) cfValue { + value, err := marshalable.MarshalPlist() + if err != nil { + panic(err) + } + return p.marshal(reflect.ValueOf(value)) +} + +// marshalTextInterface marshals a TextMarshaler to a plist string. +func (p *Encoder) marshalTextInterface(marshalable encoding.TextMarshaler) cfValue { + s, err := marshalable.MarshalText() + if err != nil { + panic(err) + } + return cfString(s) +} + +// marshalStruct marshals a reflected struct value to a plist dictionary +func (p *Encoder) marshalStruct(typ reflect.Type, val reflect.Value) cfValue { + tinfo, _ := getTypeInfo(typ) + + dict := &cfDictionary{ + keys: make([]string, 0, len(tinfo.fields)), + values: make([]cfValue, 0, len(tinfo.fields)), + } + for _, finfo := range tinfo.fields { + value := finfo.value(val) + if !value.IsValid() || finfo.omitEmpty && isEmptyValue(value) { + continue + } + dict.keys = append(dict.keys, finfo.name) + dict.values = append(dict.values, p.marshal(value)) + } + + return dict +} + +func (p *Encoder) marshalTime(val reflect.Value) cfValue { + time := val.Interface().(time.Time) + return cfDate(time) +} + +func (p *Encoder) marshal(val reflect.Value) cfValue { + if !val.IsValid() { + return nil + } + + if receiver, can := implementsInterface(val, plistMarshalerType); can { + return p.marshalPlistInterface(receiver.(Marshaler)) + } + + // time.Time implements TextMarshaler, but we need to store it in RFC3339 + if val.Type() == timeType { + return p.marshalTime(val) + } + if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) { + ival := val.Elem() + if ival.IsValid() && ival.Type() == timeType { + return p.marshalTime(ival) + } + } + + // Check for text marshaler. + if receiver, can := implementsInterface(val, textMarshalerType); can { + return p.marshalTextInterface(receiver.(encoding.TextMarshaler)) + } + + // Descend into pointers or interfaces + if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) { + val = val.Elem() + } + + // We got this far and still may have an invalid anything or nil ptr/interface + if !val.IsValid() || ((val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface) && val.IsNil()) { + return nil + } + + typ := val.Type() + + if typ == uidType { + return cfUID(val.Uint()) + } + + if val.Kind() == reflect.Struct { + return p.marshalStruct(typ, val) + } + + switch val.Kind() { + case reflect.String: + return cfString(val.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return &cfNumber{signed: true, value: uint64(val.Int())} + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return &cfNumber{signed: false, value: val.Uint()} + case reflect.Float32: + return &cfReal{wide: false, value: val.Float()} + case reflect.Float64: + return &cfReal{wide: true, value: val.Float()} + case reflect.Bool: + return cfBoolean(val.Bool()) + case reflect.Slice, reflect.Array: + if typ.Elem().Kind() == reflect.Uint8 { + bytes := []byte(nil) + if val.CanAddr() && val.Kind() == reflect.Slice { + // arrays are may be addressable but do not support .Bytes + bytes = val.Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return cfData(bytes) + } else { + values := make([]cfValue, val.Len()) + for i, length := 0, val.Len(); i < length; i++ { + if subpval := p.marshal(val.Index(i)); subpval != nil { + values[i] = subpval + } + } + return &cfArray{values} + } + case reflect.Map: + if typ.Key().Kind() != reflect.String { + panic(&unknownTypeError{typ}) + } + + l := val.Len() + dict := &cfDictionary{ + keys: make([]string, 0, l), + values: make([]cfValue, 0, l), + } + for _, keyv := range val.MapKeys() { + if subpval := p.marshal(val.MapIndex(keyv)); subpval != nil { + dict.keys = append(dict.keys, keyv.String()) + dict.values = append(dict.values, subpval) + } + } + return dict + default: + panic(&unknownTypeError{typ}) + } +} diff --git a/vendor/howett.net/plist/must.go b/vendor/howett.net/plist/must.go new file mode 100644 index 00000000..2c2523d9 --- /dev/null +++ b/vendor/howett.net/plist/must.go @@ -0,0 +1,50 @@ +package plist + +import ( + "io" + "strconv" +) + +type mustWriter struct { + io.Writer +} + +func (w mustWriter) Write(p []byte) (int, error) { + n, err := w.Writer.Write(p) + if err != nil { + panic(err) + } + return n, nil +} + +func mustParseInt(str string, base, bits int) int64 { + i, err := strconv.ParseInt(str, base, bits) + if err != nil { + panic(err) + } + return i +} + +func mustParseUint(str string, base, bits int) uint64 { + i, err := strconv.ParseUint(str, base, bits) + if err != nil { + panic(err) + } + return i +} + +func mustParseFloat(str string, bits int) float64 { + i, err := strconv.ParseFloat(str, bits) + if err != nil { + panic(err) + } + return i +} + +func mustParseBool(str string) bool { + i, err := strconv.ParseBool(str) + if err != nil { + panic(err) + } + return i +} diff --git a/vendor/howett.net/plist/plist.go b/vendor/howett.net/plist/plist.go new file mode 100644 index 00000000..8883e1c7 --- /dev/null +++ b/vendor/howett.net/plist/plist.go @@ -0,0 +1,83 @@ +package plist + +import ( + "reflect" +) + +// Property list format constants +const ( + // Used by Decoder to represent an invalid property list. + InvalidFormat int = 0 + + // Used to indicate total abandon with regards to Encoder's output format. + AutomaticFormat = 0 + + XMLFormat = 1 + BinaryFormat = 2 + OpenStepFormat = 3 + GNUStepFormat = 4 +) + +var FormatNames = map[int]string{ + InvalidFormat: "unknown/invalid", + XMLFormat: "XML", + BinaryFormat: "Binary", + OpenStepFormat: "OpenStep", + GNUStepFormat: "GNUStep", +} + +type unknownTypeError struct { + typ reflect.Type +} + +func (u *unknownTypeError) Error() string { + return "plist: can't marshal value of type " + u.typ.String() +} + +type invalidPlistError struct { + format string + err error +} + +func (e invalidPlistError) Error() string { + s := "plist: invalid " + e.format + " property list" + if e.err != nil { + s += ": " + e.err.Error() + } + return s +} + +type plistParseError struct { + format string + err error +} + +func (e plistParseError) Error() string { + s := "plist: error parsing " + e.format + " property list" + if e.err != nil { + s += ": " + e.err.Error() + } + return s +} + +// A UID represents a unique object identifier. UIDs are serialized in a manner distinct from +// that of integers. +type UID uint64 + +// Marshaler is the interface implemented by types that can marshal themselves into valid +// property list objects. The returned value is marshaled in place of the original value +// implementing Marshaler +// +// If an error is returned by MarshalPlist, marshaling stops and the error is returned. +type Marshaler interface { + MarshalPlist() (interface{}, error) +} + +// Unmarshaler is the interface implemented by types that can unmarshal themselves from +// property list objects. The UnmarshalPlist method receives a function that may +// be called to unmarshal the original property list value into a field or variable. +// +// It is safe to call the unmarshal function more than once. +type Unmarshaler interface { + UnmarshalPlist(unmarshal func(interface{}) error) error +} diff --git a/vendor/howett.net/plist/plist_types.go b/vendor/howett.net/plist/plist_types.go new file mode 100644 index 00000000..98363644 --- /dev/null +++ b/vendor/howett.net/plist/plist_types.go @@ -0,0 +1,172 @@ +package plist + +import ( + "hash/crc32" + "sort" + "time" + "strconv" +) + +// magic value used in the non-binary encoding of UIDs +// (stored as a dictionary mapping CF$UID->integer) +const cfUIDMagic = "CF$UID" + +type cfValue interface { + typeName() string + hash() interface{} +} + +type cfDictionary struct { + keys sort.StringSlice + values []cfValue +} + +func (*cfDictionary) typeName() string { + return "dictionary" +} + +func (p *cfDictionary) hash() interface{} { + return p +} + +func (p *cfDictionary) Len() int { + return len(p.keys) +} + +func (p *cfDictionary) Less(i, j int) bool { + return p.keys.Less(i, j) +} + +func (p *cfDictionary) Swap(i, j int) { + p.keys.Swap(i, j) + p.values[i], p.values[j] = p.values[j], p.values[i] +} + +func (p *cfDictionary) sort() { + sort.Sort(p) +} + +func (p *cfDictionary) maybeUID(lax bool) cfValue { + if len(p.keys) == 1 && p.keys[0] == "CF$UID" && len(p.values) == 1 { + pval := p.values[0] + if integer, ok := pval.(*cfNumber); ok { + return cfUID(integer.value) + } + // Openstep only has cfString. Act like the unmarshaller a bit. + if lax { + if str, ok := pval.(cfString); ok { + if i, err := strconv.ParseUint(string(str), 10, 64); err == nil { + return cfUID(i) + } + } + } + } + return p +} + +type cfArray struct { + values []cfValue +} + +func (*cfArray) typeName() string { + return "array" +} + +func (p *cfArray) hash() interface{} { + return p +} + +type cfString string + +func (cfString) typeName() string { + return "string" +} + +func (p cfString) hash() interface{} { + return string(p) +} + +type cfNumber struct { + signed bool + value uint64 +} + +func (*cfNumber) typeName() string { + return "integer" +} + +func (p *cfNumber) hash() interface{} { + if p.signed { + return int64(p.value) + } + return p.value +} + +type cfReal struct { + wide bool + value float64 +} + +func (cfReal) typeName() string { + return "real" +} + +func (p *cfReal) hash() interface{} { + if p.wide { + return p.value + } + return float32(p.value) +} + +type cfBoolean bool + +func (cfBoolean) typeName() string { + return "boolean" +} + +func (p cfBoolean) hash() interface{} { + return bool(p) +} + +type cfUID UID + +func (cfUID) typeName() string { + return "UID" +} + +func (p cfUID) hash() interface{} { + return p +} + +func (p cfUID) toDict() *cfDictionary { + return &cfDictionary{ + keys: []string{cfUIDMagic}, + values: []cfValue{&cfNumber{ + signed: false, + value: uint64(p), + }}, + } +} + +type cfData []byte + +func (cfData) typeName() string { + return "data" +} + +func (p cfData) hash() interface{} { + // Data are uniqued by their checksums. + // Todo: Look at calculating this only once and storing it somewhere; + // crc32 is fairly quick, however. + return crc32.ChecksumIEEE([]byte(p)) +} + +type cfDate time.Time + +func (cfDate) typeName() string { + return "date" +} + +func (p cfDate) hash() interface{} { + return time.Time(p) +} diff --git a/vendor/howett.net/plist/text_generator.go b/vendor/howett.net/plist/text_generator.go new file mode 100644 index 00000000..d71f02bb --- /dev/null +++ b/vendor/howett.net/plist/text_generator.go @@ -0,0 +1,228 @@ +package plist + +import ( + "encoding/hex" + "io" + "strconv" + "time" +) + +type textPlistGenerator struct { + writer io.Writer + format int + + quotableTable *characterSet + + indent string + depth int + + dictKvDelimiter, dictEntryDelimiter, arrayDelimiter []byte +} + +var ( + textPlistTimeLayout = "2006-01-02 15:04:05 -0700" + padding = "0000" +) + +func (p *textPlistGenerator) generateDocument(pval cfValue) { + p.writePlistValue(pval) +} + +func (p *textPlistGenerator) plistQuotedString(str string) string { + if str == "" { + return `""` + } + s := "" + quot := false + for _, r := range str { + if r > 0xFF { + quot = true + s += `\U` + us := strconv.FormatInt(int64(r), 16) + s += padding[len(us):] + s += us + } else if r > 0x7F { + quot = true + s += `\` + us := strconv.FormatInt(int64(r), 8) + s += padding[1+len(us):] + s += us + } else { + c := uint8(r) + if p.quotableTable.ContainsByte(c) { + quot = true + } + + switch c { + case '\a': + s += `\a` + case '\b': + s += `\b` + case '\v': + s += `\v` + case '\f': + s += `\f` + case '\\': + s += `\\` + case '"': + s += `\"` + case '\t', '\r', '\n': + fallthrough + default: + s += string(c) + } + } + } + if quot { + s = `"` + s + `"` + } + return s +} + +func (p *textPlistGenerator) deltaIndent(depthDelta int) { + if depthDelta < 0 { + p.depth-- + } else if depthDelta > 0 { + p.depth++ + } +} + +func (p *textPlistGenerator) writeIndent() { + if len(p.indent) == 0 { + return + } + if len(p.indent) > 0 { + p.writer.Write([]byte("\n")) + for i := 0; i < p.depth; i++ { + io.WriteString(p.writer, p.indent) + } + } +} + +func (p *textPlistGenerator) writePlistValue(pval cfValue) { + if pval == nil { + return + } + + switch pval := pval.(type) { + case *cfDictionary: + pval.sort() + p.writer.Write([]byte(`{`)) + p.deltaIndent(1) + for i, k := range pval.keys { + p.writeIndent() + io.WriteString(p.writer, p.plistQuotedString(k)) + p.writer.Write(p.dictKvDelimiter) + p.writePlistValue(pval.values[i]) + p.writer.Write(p.dictEntryDelimiter) + } + p.deltaIndent(-1) + p.writeIndent() + p.writer.Write([]byte(`}`)) + case *cfArray: + p.writer.Write([]byte(`(`)) + p.deltaIndent(1) + for _, v := range pval.values { + p.writeIndent() + p.writePlistValue(v) + p.writer.Write(p.arrayDelimiter) + } + p.deltaIndent(-1) + p.writeIndent() + p.writer.Write([]byte(`)`)) + case cfString: + io.WriteString(p.writer, p.plistQuotedString(string(pval))) + case *cfNumber: + if p.format == GNUStepFormat { + p.writer.Write([]byte(`<*I`)) + } + if pval.signed { + io.WriteString(p.writer, strconv.FormatInt(int64(pval.value), 10)) + } else { + io.WriteString(p.writer, strconv.FormatUint(pval.value, 10)) + } + if p.format == GNUStepFormat { + p.writer.Write([]byte(`>`)) + } + case *cfReal: + if p.format == GNUStepFormat { + p.writer.Write([]byte(`<*R`)) + } + // GNUstep does not differentiate between 32/64-bit floats. + io.WriteString(p.writer, strconv.FormatFloat(pval.value, 'g', -1, 64)) + if p.format == GNUStepFormat { + p.writer.Write([]byte(`>`)) + } + case cfBoolean: + if p.format == GNUStepFormat { + if pval { + p.writer.Write([]byte(`<*BY>`)) + } else { + p.writer.Write([]byte(`<*BN>`)) + } + } else { + if pval { + p.writer.Write([]byte(`1`)) + } else { + p.writer.Write([]byte(`0`)) + } + } + case cfData: + var hexencoded [9]byte + var l int + var asc = 9 + hexencoded[8] = ' ' + + p.writer.Write([]byte(`<`)) + b := []byte(pval) + for i := 0; i < len(b); i += 4 { + l = i + 4 + if l >= len(b) { + l = len(b) + // We no longer need the space - or the rest of the buffer. + // (we used >= above to get this part without another conditional :P) + asc = (l - i) * 2 + } + // Fill the buffer (only up to 8 characters, to preserve the space we implicitly include + // at the end of every encode) + hex.Encode(hexencoded[:8], b[i:l]) + io.WriteString(p.writer, string(hexencoded[:asc])) + } + p.writer.Write([]byte(`>`)) + case cfDate: + if p.format == GNUStepFormat { + p.writer.Write([]byte(`<*D`)) + io.WriteString(p.writer, time.Time(pval).In(time.UTC).Format(textPlistTimeLayout)) + p.writer.Write([]byte(`>`)) + } else { + io.WriteString(p.writer, p.plistQuotedString(time.Time(pval).In(time.UTC).Format(textPlistTimeLayout))) + } + case cfUID: + p.writePlistValue(pval.toDict()) + } +} + +func (p *textPlistGenerator) Indent(i string) { + p.indent = i + if i == "" { + p.dictKvDelimiter = []byte(`=`) + } else { + // For pretty-printing + p.dictKvDelimiter = []byte(` = `) + } +} + +func newTextPlistGenerator(w io.Writer, format int) *textPlistGenerator { + table := &osQuotable + if format == GNUStepFormat { + table = &gsQuotable + } + return &textPlistGenerator{ + writer: mustWriter{w}, + format: format, + quotableTable: table, + dictKvDelimiter: []byte(`=`), + arrayDelimiter: []byte(`,`), + dictEntryDelimiter: []byte(`;`), + } +} diff --git a/vendor/howett.net/plist/text_parser.go b/vendor/howett.net/plist/text_parser.go new file mode 100644 index 00000000..c60423ff --- /dev/null +++ b/vendor/howett.net/plist/text_parser.go @@ -0,0 +1,580 @@ +// Parser for text plist formats. +// @see https://github.com/apple/swift-corelibs-foundation/blob/master/CoreFoundation/Parsing.subproj/CFOldStylePList.c +// @see https://github.com/gnustep/libs-base/blob/master/Source/NSPropertyList.m +// This parser also handles strings files. + +package plist + +import ( + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "runtime" + "strings" + "time" + "unicode/utf16" + "unicode/utf8" +) + +type textPlistParser struct { + reader io.Reader + format int + + input string + start int + pos int + width int +} + +func convertU16(buffer []byte, bo binary.ByteOrder) (string, error) { + if len(buffer)%2 != 0 { + return "", errors.New("truncated utf16") + } + + tmp := make([]uint16, len(buffer)/2) + for i := 0; i < len(buffer); i += 2 { + tmp[i/2] = bo.Uint16(buffer[i : i+2]) + } + return string(utf16.Decode(tmp)), nil +} + +func guessEncodingAndConvert(buffer []byte) (string, error) { + if len(buffer) >= 3 && buffer[0] == 0xEF && buffer[1] == 0xBB && buffer[2] == 0xBF { + // UTF-8 BOM + return zeroCopy8BitString(buffer, 3, len(buffer)-3), nil + } else if len(buffer) >= 2 { + // UTF-16 guesses + + switch { + // stream is big-endian (BOM is FE FF or head is 00 XX) + case (buffer[0] == 0xFE && buffer[1] == 0xFF): + return convertU16(buffer[2:], binary.BigEndian) + case (buffer[0] == 0 && buffer[1] != 0): + return convertU16(buffer, binary.BigEndian) + + // stream is little-endian (BOM is FE FF or head is XX 00) + case (buffer[0] == 0xFF && buffer[1] == 0xFE): + return convertU16(buffer[2:], binary.LittleEndian) + case (buffer[0] != 0 && buffer[1] == 0): + return convertU16(buffer, binary.LittleEndian) + } + } + + // fallback: assume ASCII (not great!) + return zeroCopy8BitString(buffer, 0, len(buffer)), nil +} + +func (p *textPlistParser) parseDocument() (pval cfValue, parseError error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + // Wrap all non-invalid-plist errors. + parseError = plistParseError{"text", r.(error)} + } + }() + + buffer, err := ioutil.ReadAll(p.reader) + if err != nil { + panic(err) + } + + p.input, err = guessEncodingAndConvert(buffer) + if err != nil { + panic(err) + } + + val := p.parsePlistValue() + + p.skipWhitespaceAndComments() + if p.peek() != eof { + if _, ok := val.(cfString); !ok { + p.error("garbage after end of document") + } + + // Try parsing as .strings. + // See -[NSDictionary propertyListFromStringsFileFormat:]. + p.start = 0 + p.pos = 0 + val = p.parseDictionary(true) + } + + pval = val + + return +} + +const eof rune = -1 + +func (p *textPlistParser) error(e string, args ...interface{}) { + line := strings.Count(p.input[:p.pos], "\n") + char := p.pos - strings.LastIndex(p.input[:p.pos], "\n") - 1 + panic(fmt.Errorf("%s at line %d character %d", fmt.Sprintf(e, args...), line, char)) +} + +func (p *textPlistParser) next() rune { + if int(p.pos) >= len(p.input) { + p.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(p.input[p.pos:]) + p.width = w + p.pos += p.width + return r +} + +func (p *textPlistParser) backup() { + p.pos -= p.width +} + +func (p *textPlistParser) peek() rune { + r := p.next() + p.backup() + return r +} + +func (p *textPlistParser) emit() string { + s := p.input[p.start:p.pos] + p.start = p.pos + return s +} + +func (p *textPlistParser) ignore() { + p.start = p.pos +} + +func (p *textPlistParser) empty() bool { + return p.start == p.pos +} + +func (p *textPlistParser) scanUntil(ch rune) { + if x := strings.IndexRune(p.input[p.pos:], ch); x >= 0 { + p.pos += x + return + } + p.pos = len(p.input) +} + +func (p *textPlistParser) scanUntilAny(chs string) { + if x := strings.IndexAny(p.input[p.pos:], chs); x >= 0 { + p.pos += x + return + } + p.pos = len(p.input) +} + +func (p *textPlistParser) scanCharactersInSet(ch *characterSet) { + for ch.Contains(p.next()) { + } + p.backup() +} + +func (p *textPlistParser) scanCharactersNotInSet(ch *characterSet) { + var r rune + for { + r = p.next() + if r == eof || ch.Contains(r) { + break + } + } + p.backup() +} + +func (p *textPlistParser) skipWhitespaceAndComments() { + for { + p.scanCharactersInSet(&whitespace) + if strings.HasPrefix(p.input[p.pos:], "//") { + p.scanCharactersNotInSet(&newlineCharacterSet) + } else if strings.HasPrefix(p.input[p.pos:], "/*") { + if x := strings.Index(p.input[p.pos:], "*/"); x >= 0 { + p.pos += x + 2 // skip the */ as well + continue // consume more whitespace + } else { + p.error("unexpected eof in block comment") + } + } else { + break + } + } + p.ignore() +} + +func (p *textPlistParser) parseOctalDigits(max int) uint64 { + var val uint64 + + for i := 0; i < max; i++ { + r := p.next() + + if r >= '0' && r <= '7' { + val <<= 3 + val |= uint64((r - '0')) + } else { + p.backup() + break + } + } + return val +} + +func (p *textPlistParser) parseHexDigits(max int) uint64 { + var val uint64 + + for i := 0; i < max; i++ { + r := p.next() + + if r >= 'a' && r <= 'f' { + val <<= 4 + val |= 10 + uint64((r - 'a')) + } else if r >= 'A' && r <= 'F' { + val <<= 4 + val |= 10 + uint64((r - 'A')) + } else if r >= '0' && r <= '9' { + val <<= 4 + val |= uint64((r - '0')) + } else { + p.backup() + break + } + } + return val +} + +// the \ has already been consumed +func (p *textPlistParser) parseEscape() string { + var s string + switch p.next() { + case 'a': + s = "\a" + case 'b': + s = "\b" + case 'v': + s = "\v" + case 'f': + s = "\f" + case 't': + s = "\t" + case 'r': + s = "\r" + case 'n': + s = "\n" + case '\\': + s = `\` + case '"': + s = `"` + case 'x': // This is our extension. + s = string(rune(p.parseHexDigits(2))) + case 'u', 'U': // 'u' is a GNUstep extension. + s = string(rune(p.parseHexDigits(4))) + case '0', '1', '2', '3', '4', '5', '6', '7': + p.backup() // we've already consumed one of the digits + s = string(rune(p.parseOctalDigits(3))) + default: + p.backup() // everything else should be accepted + } + p.ignore() // skip the entire escape sequence + return s +} + +// the " has already been consumed +func (p *textPlistParser) parseQuotedString() cfString { + p.ignore() // ignore the " + + slowPath := false + s := "" + + for { + p.scanUntilAny(`"\`) + switch p.peek() { + case eof: + p.error("unexpected eof in quoted string") + case '"': + section := p.emit() + p.pos++ // skip " + if !slowPath { + return cfString(section) + } else { + s += section + return cfString(s) + } + case '\\': + slowPath = true + s += p.emit() + p.next() // consume \ + s += p.parseEscape() + } + } +} + +func (p *textPlistParser) parseUnquotedString() cfString { + p.scanCharactersNotInSet(&gsQuotable) + s := p.emit() + if s == "" { + p.error("invalid unquoted string (found an unquoted character that should be quoted?)") + } + + return cfString(s) +} + +// the { has already been consumed +func (p *textPlistParser) parseDictionary(ignoreEof bool) cfValue { + //p.ignore() // ignore the { + var keypv cfValue + keys := make([]string, 0, 32) + values := make([]cfValue, 0, 32) +outer: + for { + p.skipWhitespaceAndComments() + + switch p.next() { + case eof: + if !ignoreEof { + p.error("unexpected eof in dictionary") + } + fallthrough + case '}': + break outer + case '"': + keypv = p.parseQuotedString() + default: + p.backup() + keypv = p.parseUnquotedString() + } + + // INVARIANT: key can't be nil; parseQuoted and parseUnquoted + // will panic out before they return nil. + + p.skipWhitespaceAndComments() + + var val cfValue + n := p.next() + if n == ';' { + // This is supposed to be .strings-specific. + // GNUstep parses this as an empty string. + // Apple copies the key like we do. + val = keypv + } else if n == '=' { + // whitespace is consumed within + val = p.parsePlistValue() + + p.skipWhitespaceAndComments() + + if p.next() != ';' { + p.error("missing ; in dictionary") + } + } else { + p.error("missing = in dictionary") + } + + keys = append(keys, string(keypv.(cfString))) + values = append(values, val) + } + + dict := &cfDictionary{keys: keys, values: values} + return dict.maybeUID(p.format == OpenStepFormat) +} + +// the ( has already been consumed +func (p *textPlistParser) parseArray() *cfArray { + //p.ignore() // ignore the ( + values := make([]cfValue, 0, 32) +outer: + for { + p.skipWhitespaceAndComments() + + switch p.next() { + case eof: + p.error("unexpected eof in array") + case ')': + break outer // done here + case ',': + continue // restart; ,) is valid and we don't want to blow it + default: + p.backup() + } + + pval := p.parsePlistValue() // whitespace is consumed within + if str, ok := pval.(cfString); ok && string(str) == "" { + // Empty strings in arrays are apparently skipped? + // TODO: Figure out why this was implemented. + continue + } + values = append(values, pval) + } + return &cfArray{values} +} + +// the <* have already been consumed +func (p *textPlistParser) parseGNUStepValue() cfValue { + typ := p.next() + + if typ == '>' || typ == eof { // <*>, <*EOF + p.error("invalid GNUStep extended value") + } + + if typ != 'I' && typ != 'R' && typ != 'B' && typ != 'D' { + // early out: no need to collect the value if we'll fail to understand it + p.error("unknown GNUStep extended value type `" + string(typ) + "'") + } + + if p.peek() == '"' { // <*x" + p.next() + } + + p.ignore() + p.scanUntil('>') + + if p.peek() == eof { // <*xEOF or <*x"EOF + p.error("unterminated GNUStep extended value") + } + + if p.empty() { // <*x>, <*x""> + p.error("empty GNUStep extended value") + } + + v := p.emit() + p.next() // consume the > + + if v[len(v)-1] == '"' { + // GNUStep tolerates malformed quoted values, as in <*I5"> and <*I"5> + // It purportedly does so by stripping the trailing quote + v = v[:len(v)-1] + } + + switch typ { + case 'I': + if v[0] == '-' { + n := mustParseInt(v, 10, 64) + return &cfNumber{signed: true, value: uint64(n)} + } else { + n := mustParseUint(v, 10, 64) + return &cfNumber{signed: false, value: n} + } + case 'R': + n := mustParseFloat(v, 64) + return &cfReal{wide: true, value: n} // TODO(DH) 32/64 + case 'B': + b := v[0] == 'Y' + return cfBoolean(b) + case 'D': + t, err := time.Parse(textPlistTimeLayout, v) + if err != nil { + p.error(err.Error()) + } + + return cfDate(t.In(time.UTC)) + } + // We should never get here; we checked the type above + return nil +} + +// the <[ have already been consumed +func (p *textPlistParser) parseGNUStepBase64() cfData { + p.ignore() + p.scanUntil(']') + v := p.emit() + + if p.next() != ']' { + p.error("invalid GNUStep base64 data (expected ']')") + } + + if p.next() != '>' { + p.error("invalid GNUStep base64 data (expected '>')") + } + + // Emulate NSDataBase64DecodingIgnoreUnknownCharacters + filtered := strings.Map(base64ValidChars.Map, v) + data, err := base64.StdEncoding.DecodeString(filtered) + if err != nil { + p.error("invalid GNUStep base64 data: " + err.Error()) + } + return cfData(data) +} + +// The < has already been consumed +func (p *textPlistParser) parseHexData() cfData { + buf := make([]byte, 256) + i := 0 + c := 0 + + for { + r := p.next() + switch r { + case eof: + p.error("unexpected eof in data") + case '>': + if c&1 == 1 { + p.error("uneven number of hex digits in data") + } + p.ignore() + return cfData(buf[:i]) + // Apple and GNUstep both want these in pairs. We are a bit more lax. + // GS accepts comments too, but that seems like a lot of work. + case ' ', '\t', '\n', '\r', '\u2028', '\u2029': + continue + } + + buf[i] <<= 4 + if r >= 'a' && r <= 'f' { + buf[i] |= 10 + byte((r - 'a')) + } else if r >= 'A' && r <= 'F' { + buf[i] |= 10 + byte((r - 'A')) + } else if r >= '0' && r <= '9' { + buf[i] |= byte((r - '0')) + } else { + p.error("unexpected hex digit `%c'", r) + } + + c++ + if c&1 == 0 { + i++ + if i >= len(buf) { + realloc := make([]byte, len(buf)*2) + copy(realloc, buf) + buf = realloc + } + } + } +} + +func (p *textPlistParser) parsePlistValue() cfValue { + for { + p.skipWhitespaceAndComments() + + switch p.next() { + case eof: + return &cfDictionary{} + case '<': + switch p.next() { + case '*': + p.format = GNUStepFormat + return p.parseGNUStepValue() + case '[': + p.format = GNUStepFormat + return p.parseGNUStepBase64() + default: + p.backup() + return p.parseHexData() + } + case '"': + return p.parseQuotedString() + case '{': + return p.parseDictionary(false) + case '(': + return p.parseArray() + default: + p.backup() + return p.parseUnquotedString() + } + } +} + +func newTextPlistParser(r io.Reader) *textPlistParser { + return &textPlistParser{ + reader: r, + format: OpenStepFormat, + } +} diff --git a/vendor/howett.net/plist/text_tables.go b/vendor/howett.net/plist/text_tables.go new file mode 100644 index 00000000..2bdd7ba9 --- /dev/null +++ b/vendor/howett.net/plist/text_tables.go @@ -0,0 +1,61 @@ +package plist + +type characterSet [4]uint64 + +func (s *characterSet) Map(ch rune) rune { + if s.Contains(ch) { + return ch + } else { + return -1 + } +} + +func (s *characterSet) Contains(ch rune) bool { + return ch >= 0 && ch <= 255 && s.ContainsByte(byte(ch)) +} + +func (s *characterSet) ContainsByte(ch byte) bool { + return (s[ch/64]&(1<<(ch%64)) > 0) +} + +// Bitmap of characters that must be inside a quoted string +// when written to an old-style property list +// Low bits represent lower characters, and each uint64 represents 64 characters. +var gsQuotable = characterSet{ + 0x78001385ffffffff, + 0xa800000138000000, + 0xffffffffffffffff, + 0xffffffffffffffff, +} + +// 7f instead of 3f in the top line: CFOldStylePlist.c says . is valid, but they quote it. +// ef instead og 6f in the top line: ' will be quoted +var osQuotable = characterSet{ + 0xf4007fefffffffff, + 0xf8000001f8000001, + 0xffffffffffffffff, + 0xffffffffffffffff, +} + +var whitespace = characterSet{ + 0x0000000100003f00, + 0x0000000000000000, + 0x0000000000000000, + 0x0000000000000000, +} + +var newlineCharacterSet = characterSet{ + 0x0000000000002400, + 0x0000000000000000, + 0x0000000000000000, + 0x0000000000000000, +} + +// Bitmap of characters that are valid in base64-encoded strings. +// Used to filter out non-b64 characters to emulate NSDataBase64DecodingIgnoreUnknownCharacters +var base64ValidChars = characterSet{ + 0x23ff880000000000, + 0x07fffffe07fffffe, + 0x0000000000000000, + 0x0000000000000000, +} diff --git a/vendor/howett.net/plist/typeinfo.go b/vendor/howett.net/plist/typeinfo.go new file mode 100644 index 00000000..f0b920f8 --- /dev/null +++ b/vendor/howett.net/plist/typeinfo.go @@ -0,0 +1,170 @@ +package plist + +import ( + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the plist representation of a type. +type typeInfo struct { + fields []fieldInfo +} + +// fieldInfo holds details for the plist representation of a single field. +type fieldInfo struct { + idx []int + name string + omitEmpty bool +} + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("plist") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("plist") + + // Parse flags. + tokens := strings.Split(tag, ",") + tag = tokens[0] + if len(tokens) > 1 { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "omitempty": + finfo.omitEmpty = true + } + } + } + + if tag == "" { + // If the name part of the tag is completely empty, + // use the field name + finfo.name = f.Name + return finfo, nil + } + + finfo.name = tag + return finfo, nil +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/vendor/howett.net/plist/unmarshal.go b/vendor/howett.net/plist/unmarshal.go new file mode 100644 index 00000000..63b4b1d5 --- /dev/null +++ b/vendor/howett.net/plist/unmarshal.go @@ -0,0 +1,331 @@ +package plist + +import ( + "encoding" + "fmt" + "reflect" + "runtime" + "time" +) + +type incompatibleDecodeTypeError struct { + dest reflect.Type + src string // type name (from cfValue) +} + +func (u *incompatibleDecodeTypeError) Error() string { + return fmt.Sprintf("plist: type mismatch: tried to decode plist type `%v' into value of type `%v'", u.src, u.dest) +} + +var ( + plistUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + uidType = reflect.TypeOf(UID(0)) +) + +func isEmptyInterface(v reflect.Value) bool { + return v.Kind() == reflect.Interface && v.NumMethod() == 0 +} + +func (p *Decoder) unmarshalPlistInterface(pval cfValue, unmarshalable Unmarshaler) { + err := unmarshalable.UnmarshalPlist(func(i interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + p.unmarshal(pval, reflect.ValueOf(i)) + return + }) + + if err != nil { + panic(err) + } +} + +func (p *Decoder) unmarshalTextInterface(pval cfString, unmarshalable encoding.TextUnmarshaler) { + err := unmarshalable.UnmarshalText([]byte(pval)) + if err != nil { + panic(err) + } +} + +func (p *Decoder) unmarshalTime(pval cfDate, val reflect.Value) { + val.Set(reflect.ValueOf(time.Time(pval))) +} + +func (p *Decoder) unmarshalLaxString(s string, val reflect.Value) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := mustParseInt(s, 10, 64) + val.SetInt(i) + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + i := mustParseUint(s, 10, 64) + val.SetUint(i) + return + case reflect.Float32, reflect.Float64: + f := mustParseFloat(s, 64) + val.SetFloat(f) + return + case reflect.Bool: + b := mustParseBool(s) + val.SetBool(b) + return + case reflect.Struct: + if val.Type() == timeType { + t, err := time.Parse(textPlistTimeLayout, s) + if err != nil { + panic(err) + } + val.Set(reflect.ValueOf(t.In(time.UTC))) + return + } + fallthrough + default: + panic(&incompatibleDecodeTypeError{val.Type(), "string"}) + } +} + +func (p *Decoder) unmarshal(pval cfValue, val reflect.Value) { + if pval == nil { + return + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if isEmptyInterface(val) { + v := p.valueInterface(pval) + val.Set(reflect.ValueOf(v)) + return + } + + incompatibleTypeError := &incompatibleDecodeTypeError{val.Type(), pval.typeName()} + + // time.Time implements TextMarshaler, but we need to parse it as RFC3339 + if date, ok := pval.(cfDate); ok { + if val.Type() == timeType { + p.unmarshalTime(date, val) + return + } + panic(incompatibleTypeError) + } + + if receiver, can := implementsInterface(val, plistUnmarshalerType); can { + p.unmarshalPlistInterface(pval, receiver.(Unmarshaler)) + return + } + + if val.Type() != timeType { + if receiver, can := implementsInterface(val, textUnmarshalerType); can { + if str, ok := pval.(cfString); ok { + p.unmarshalTextInterface(str, receiver.(encoding.TextUnmarshaler)) + } else { + panic(incompatibleTypeError) + } + return + } + } + + typ := val.Type() + + switch pval := pval.(type) { + case cfString: + if val.Kind() == reflect.String { + val.SetString(string(pval)) + return + } + if p.lax { + p.unmarshalLaxString(string(pval), val) + return + } + + panic(incompatibleTypeError) + case *cfNumber: + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val.SetInt(int64(pval.value)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val.SetUint(pval.value) + default: + panic(incompatibleTypeError) + } + case *cfReal: + if val.Kind() == reflect.Float32 || val.Kind() == reflect.Float64 { + // TODO: Consider warning on a downcast (storing a 64-bit value in a 32-bit reflect) + val.SetFloat(pval.value) + } else { + panic(incompatibleTypeError) + } + case cfBoolean: + if val.Kind() == reflect.Bool { + val.SetBool(bool(pval)) + } else { + panic(incompatibleTypeError) + } + case cfData: + if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { + panic(incompatibleTypeError) + } + + if typ.Elem().Kind() != reflect.Uint8 { + panic(incompatibleTypeError) + } + + b := []byte(pval) + switch val.Kind() { + case reflect.Slice: + val.SetBytes(b) + case reflect.Array: + if val.Len() < len(b) { + panic(fmt.Errorf("plist: attempted to unmarshal %d bytes into a byte array of size %d", len(b), val.Len())) + } + sval := reflect.ValueOf(b) + reflect.Copy(val, sval) + } + case cfUID: + if val.Type() == uidType { + val.SetUint(uint64(pval)) + } else { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val.SetInt(int64(pval)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + val.SetUint(uint64(pval)) + default: + panic(incompatibleTypeError) + } + } + case *cfArray: + p.unmarshalArray(pval, val) + case *cfDictionary: + p.unmarshalDictionary(pval, val) + } +} + +func (p *Decoder) unmarshalArray(a *cfArray, val reflect.Value) { + var n int + if val.Kind() == reflect.Slice { + // Slice of element values. + // Grow slice. + cnt := len(a.values) + val.Len() + if cnt >= val.Cap() { + ncap := 2 * cnt + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(val.Type(), val.Len(), ncap) + reflect.Copy(new, val) + val.Set(new) + } + n = val.Len() + val.SetLen(cnt) + } else if val.Kind() == reflect.Array { + if len(a.values) > val.Cap() { + panic(fmt.Errorf("plist: attempted to unmarshal %d values into an array of size %d", len(a.values), val.Cap())) + } + } else { + panic(&incompatibleDecodeTypeError{val.Type(), a.typeName()}) + } + + // Recur to read element into slice. + for _, sval := range a.values { + p.unmarshal(sval, val.Index(n)) + n++ + } + return +} + +func (p *Decoder) unmarshalDictionary(dict *cfDictionary, val reflect.Value) { + typ := val.Type() + switch val.Kind() { + case reflect.Struct: + tinfo, err := getTypeInfo(typ) + if err != nil { + panic(err) + } + + entries := make(map[string]cfValue, len(dict.keys)) + for i, k := range dict.keys { + sval := dict.values[i] + entries[k] = sval + } + + for _, finfo := range tinfo.fields { + p.unmarshal(entries[finfo.name], finfo.value(val)) + } + case reflect.Map: + if val.IsNil() { + val.Set(reflect.MakeMap(typ)) + } + + for i, k := range dict.keys { + sval := dict.values[i] + + keyv := reflect.ValueOf(k).Convert(typ.Key()) + mapElem := reflect.New(typ.Elem()).Elem() + + p.unmarshal(sval, mapElem) + val.SetMapIndex(keyv, mapElem) + } + default: + panic(&incompatibleDecodeTypeError{typ, dict.typeName()}) + } +} + +/* *Interface is modelled after encoding/json */ +func (p *Decoder) valueInterface(pval cfValue) interface{} { + switch pval := pval.(type) { + case cfString: + return string(pval) + case *cfNumber: + if pval.signed { + return int64(pval.value) + } + return pval.value + case *cfReal: + if pval.wide { + return pval.value + } else { + return float32(pval.value) + } + case cfBoolean: + return bool(pval) + case *cfArray: + return p.arrayInterface(pval) + case *cfDictionary: + return p.dictionaryInterface(pval) + case cfData: + return []byte(pval) + case cfDate: + return time.Time(pval) + case cfUID: + return UID(pval) + } + return nil +} + +func (p *Decoder) arrayInterface(a *cfArray) []interface{} { + out := make([]interface{}, len(a.values)) + for i, subv := range a.values { + out[i] = p.valueInterface(subv) + } + return out +} + +func (p *Decoder) dictionaryInterface(dict *cfDictionary) map[string]interface{} { + out := make(map[string]interface{}) + for i, k := range dict.keys { + subv := dict.values[i] + out[k] = p.valueInterface(subv) + } + return out +} diff --git a/vendor/howett.net/plist/util.go b/vendor/howett.net/plist/util.go new file mode 100644 index 00000000..d4e437a4 --- /dev/null +++ b/vendor/howett.net/plist/util.go @@ -0,0 +1,25 @@ +package plist + +import "io" + +type countedWriter struct { + io.Writer + nbytes int +} + +func (w *countedWriter) Write(p []byte) (int, error) { + n, err := w.Writer.Write(p) + w.nbytes += n + return n, err +} + +func (w *countedWriter) BytesWritten() int { + return w.nbytes +} + +func unsignedGetBase(s string) (string, int) { + if len(s) > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') { + return s[2:], 16 + } + return s, 10 +} diff --git a/vendor/howett.net/plist/xml_generator.go b/vendor/howett.net/plist/xml_generator.go new file mode 100644 index 00000000..30597c16 --- /dev/null +++ b/vendor/howett.net/plist/xml_generator.go @@ -0,0 +1,178 @@ +package plist + +import ( + "bufio" + "encoding/base64" + "encoding/xml" + "io" + "math" + "strconv" + "time" +) + +const ( + xmlHEADER string = `` + "\n" + xmlDOCTYPE = `` + "\n" + xmlArrayTag = "array" + xmlDataTag = "data" + xmlDateTag = "date" + xmlDictTag = "dict" + xmlFalseTag = "false" + xmlIntegerTag = "integer" + xmlKeyTag = "key" + xmlPlistTag = "plist" + xmlRealTag = "real" + xmlStringTag = "string" + xmlTrueTag = "true" +) + +func formatXMLFloat(f float64) string { + switch { + case math.IsInf(f, 1): + return "inf" + case math.IsInf(f, -1): + return "-inf" + case math.IsNaN(f): + return "nan" + } + return strconv.FormatFloat(f, 'g', -1, 64) +} + +type xmlPlistGenerator struct { + *bufio.Writer + + indent string + depth int + putNewline bool +} + +func (p *xmlPlistGenerator) generateDocument(root cfValue) { + p.WriteString(xmlHEADER) + p.WriteString(xmlDOCTYPE) + + p.openTag(`plist version="1.0"`) + p.writePlistValue(root) + p.closeTag(xmlPlistTag) + p.Flush() +} + +func (p *xmlPlistGenerator) openTag(n string) { + p.writeIndent(1) + p.WriteByte('<') + p.WriteString(n) + p.WriteByte('>') +} + +func (p *xmlPlistGenerator) closeTag(n string) { + p.writeIndent(-1) + p.WriteString("') +} + +func (p *xmlPlistGenerator) element(n string, v string) { + p.writeIndent(0) + if len(v) == 0 { + p.WriteByte('<') + p.WriteString(n) + p.WriteString("/>") + } else { + p.WriteByte('<') + p.WriteString(n) + p.WriteByte('>') + + err := xml.EscapeText(p.Writer, []byte(v)) + if err != nil { + panic(err) + } + + p.WriteString("') + } +} + +func (p *xmlPlistGenerator) writeDictionary(dict *cfDictionary) { + dict.sort() + p.openTag(xmlDictTag) + for i, k := range dict.keys { + p.element(xmlKeyTag, k) + p.writePlistValue(dict.values[i]) + } + p.closeTag(xmlDictTag) +} + +func (p *xmlPlistGenerator) writeArray(a *cfArray) { + p.openTag(xmlArrayTag) + for _, v := range a.values { + p.writePlistValue(v) + } + p.closeTag(xmlArrayTag) +} + +func (p *xmlPlistGenerator) writePlistValue(pval cfValue) { + if pval == nil { + return + } + + switch pval := pval.(type) { + case cfString: + p.element(xmlStringTag, string(pval)) + case *cfNumber: + if pval.signed { + p.element(xmlIntegerTag, strconv.FormatInt(int64(pval.value), 10)) + } else { + p.element(xmlIntegerTag, strconv.FormatUint(pval.value, 10)) + } + case *cfReal: + p.element(xmlRealTag, formatXMLFloat(pval.value)) + case cfBoolean: + if bool(pval) { + p.element(xmlTrueTag, "") + } else { + p.element(xmlFalseTag, "") + } + case cfData: + p.element(xmlDataTag, base64.StdEncoding.EncodeToString([]byte(pval))) + case cfDate: + p.element(xmlDateTag, time.Time(pval).In(time.UTC).Format(time.RFC3339)) + case *cfDictionary: + p.writeDictionary(pval) + case *cfArray: + p.writeArray(pval) + case cfUID: + p.writePlistValue(pval.toDict()) + } +} + +func (p *xmlPlistGenerator) writeIndent(delta int) { + if len(p.indent) == 0 { + return + } + + if delta < 0 { + p.depth-- + } + + if p.putNewline { + // from encoding/xml/marshal.go; it seems to be intended + // to suppress the first newline. + p.WriteByte('\n') + } else { + p.putNewline = true + } + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + if delta > 0 { + p.depth++ + } +} + +func (p *xmlPlistGenerator) Indent(i string) { + p.indent = i +} + +func newXMLPlistGenerator(w io.Writer) *xmlPlistGenerator { + return &xmlPlistGenerator{Writer: bufio.NewWriter(w)} +} diff --git a/vendor/howett.net/plist/xml_parser.go b/vendor/howett.net/plist/xml_parser.go new file mode 100644 index 00000000..7415ef3e --- /dev/null +++ b/vendor/howett.net/plist/xml_parser.go @@ -0,0 +1,211 @@ +package plist + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "io" + "runtime" + "strings" + "time" +) + +type xmlPlistParser struct { + reader io.Reader + xmlDecoder *xml.Decoder + whitespaceReplacer *strings.Replacer + ntags int +} + +func (p *xmlPlistParser) parseDocument() (pval cfValue, parseError error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if _, ok := r.(invalidPlistError); ok { + parseError = r.(error) + } else { + // Wrap all non-invalid-plist errors. + parseError = plistParseError{"XML", r.(error)} + } + } + }() + for { + if token, err := p.xmlDecoder.Token(); err == nil { + if element, ok := token.(xml.StartElement); ok { + pval = p.parseXMLElement(element) + if p.ntags == 0 { + panic(invalidPlistError{"XML", errors.New("no elements encountered")}) + } + return + } + } else { + // The first XML parse turned out to be invalid: + // we do not have an XML property list. + panic(invalidPlistError{"XML", err}) + } + } +} + +func (p *xmlPlistParser) parseXMLElement(element xml.StartElement) cfValue { + var charData xml.CharData + switch element.Name.Local { + case "plist": + p.ntags++ + for { + token, err := p.xmlDecoder.Token() + if err != nil { + panic(err) + } + + if el, ok := token.(xml.EndElement); ok && el.Name.Local == "plist" { + break + } + + if el, ok := token.(xml.StartElement); ok { + return p.parseXMLElement(el) + } + } + return nil + case "string": + p.ntags++ + err := p.xmlDecoder.DecodeElement(&charData, &element) + if err != nil { + panic(err) + } + + return cfString(charData) + case "integer": + p.ntags++ + err := p.xmlDecoder.DecodeElement(&charData, &element) + if err != nil { + panic(err) + } + + s := string(charData) + if len(s) == 0 { + panic(errors.New("invalid empty ")) + } + + if s[0] == '-' { + s, base := unsignedGetBase(s[1:]) + n := mustParseInt("-"+s, base, 64) + return &cfNumber{signed: true, value: uint64(n)} + } else { + s, base := unsignedGetBase(s) + n := mustParseUint(s, base, 64) + return &cfNumber{signed: false, value: n} + } + case "real": + p.ntags++ + err := p.xmlDecoder.DecodeElement(&charData, &element) + if err != nil { + panic(err) + } + + n := mustParseFloat(string(charData), 64) + return &cfReal{wide: true, value: n} + case "true", "false": + p.ntags++ + p.xmlDecoder.Skip() + + b := element.Name.Local == "true" + return cfBoolean(b) + case "date": + p.ntags++ + err := p.xmlDecoder.DecodeElement(&charData, &element) + if err != nil { + panic(err) + } + + t, err := time.ParseInLocation(time.RFC3339, string(charData), time.UTC) + if err != nil { + panic(err) + } + + return cfDate(t) + case "data": + p.ntags++ + err := p.xmlDecoder.DecodeElement(&charData, &element) + if err != nil { + panic(err) + } + + str := p.whitespaceReplacer.Replace(string(charData)) + + l := base64.StdEncoding.DecodedLen(len(str)) + bytes := make([]uint8, l) + l, err = base64.StdEncoding.Decode(bytes, []byte(str)) + if err != nil { + panic(err) + } + + return cfData(bytes[:l]) + case "dict": + p.ntags++ + var key *string + keys := make([]string, 0, 32) + values := make([]cfValue, 0, 32) + for { + token, err := p.xmlDecoder.Token() + if err != nil { + panic(err) + } + + if el, ok := token.(xml.EndElement); ok && el.Name.Local == "dict" { + if key != nil { + panic(errors.New("missing value in dictionary")) + } + break + } + + if el, ok := token.(xml.StartElement); ok { + if el.Name.Local == "key" { + var k string + p.xmlDecoder.DecodeElement(&k, &el) + key = &k + } else { + if key == nil { + panic(errors.New("missing key in dictionary")) + } + keys = append(keys, *key) + values = append(values, p.parseXMLElement(el)) + key = nil + } + } + } + + dict := &cfDictionary{keys: keys, values: values} + return dict.maybeUID(false) + case "array": + p.ntags++ + values := make([]cfValue, 0, 10) + for { + token, err := p.xmlDecoder.Token() + if err != nil { + panic(err) + } + + if el, ok := token.(xml.EndElement); ok && el.Name.Local == "array" { + break + } + + if el, ok := token.(xml.StartElement); ok { + values = append(values, p.parseXMLElement(el)) + } + } + return &cfArray{values} + } + err := fmt.Errorf("encountered unknown element %s", element.Name.Local) + if p.ntags == 0 { + // If out first XML tag is invalid, it might be an openstep data element, ala or <0101> + panic(invalidPlistError{"XML", err}) + } + panic(err) +} + +func newXMLPlistParser(r io.Reader) *xmlPlistParser { + return &xmlPlistParser{r, xml.NewDecoder(r), strings.NewReplacer("\t", "", "\n", "", " ", "", "\r", ""), 0} +} diff --git a/vendor/howett.net/plist/zerocopy.go b/vendor/howett.net/plist/zerocopy.go new file mode 100644 index 00000000..999f401b --- /dev/null +++ b/vendor/howett.net/plist/zerocopy.go @@ -0,0 +1,20 @@ +// +build !appengine + +package plist + +import ( + "reflect" + "unsafe" +) + +func zeroCopy8BitString(buf []byte, off int, len int) string { + if len == 0 { + return "" + } + + var s string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + hdr.Data = uintptr(unsafe.Pointer(&buf[off])) + hdr.Len = len + return s +} diff --git a/vendor/howett.net/plist/zerocopy_appengine.go b/vendor/howett.net/plist/zerocopy_appengine.go new file mode 100644 index 00000000..dbd9a1ac --- /dev/null +++ b/vendor/howett.net/plist/zerocopy_appengine.go @@ -0,0 +1,7 @@ +// +build appengine + +package plist + +func zeroCopy8BitString(buf []byte, off int, len int) string { + return string(buf[off : off+len]) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 223a7399..20c55185 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,6 @@ +# github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 +## explicit; go 1.20 +github.com/AdaLogics/go-fuzz-headers # github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -13,6 +16,9 @@ github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid # github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d ## explicit +# github.com/StackExchange/wmi v1.2.1 +## explicit; go 1.13 +github.com/StackExchange/wmi # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -24,9 +30,36 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/containerd/containerd/v2 v2.0.4 +## explicit; go 1.22.0 +github.com/containerd/containerd/v2/core/content +github.com/containerd/containerd/v2/core/images +github.com/containerd/containerd/v2/core/images/archive +github.com/containerd/containerd/v2/core/remotes +github.com/containerd/containerd/v2/core/remotes/docker +github.com/containerd/containerd/v2/core/remotes/docker/auth +github.com/containerd/containerd/v2/core/remotes/docker/schema1 +github.com/containerd/containerd/v2/core/remotes/errors +github.com/containerd/containerd/v2/internal/fsverity +github.com/containerd/containerd/v2/internal/randutil +github.com/containerd/containerd/v2/pkg/archive/compression +github.com/containerd/containerd/v2/pkg/deprecation +github.com/containerd/containerd/v2/pkg/filters +github.com/containerd/containerd/v2/pkg/kernelversion +github.com/containerd/containerd/v2/pkg/labels +github.com/containerd/containerd/v2/pkg/reference +github.com/containerd/containerd/v2/pkg/tracing +github.com/containerd/containerd/v2/plugins/content/local +github.com/containerd/containerd/v2/version +# github.com/containerd/errdefs v1.0.0 +## explicit; go 1.20 +github.com/containerd/errdefs # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log +# github.com/containerd/platforms v1.0.0-rc.1 +## explicit; go 1.20 +github.com/containerd/platforms # github.com/containerd/stargz-snapshotter/estargz v0.16.3 ## explicit; go 1.22.0 github.com/containerd/stargz-snapshotter/estargz @@ -153,11 +186,20 @@ github.com/docker/model-distribution/internal/progress github.com/docker/model-distribution/internal/store github.com/docker/model-distribution/registry github.com/docker/model-distribution/types -# github.com/docker/model-runner v0.0.0-20250512190413-96af7b750f88 +# github.com/docker/model-runner v0.0.0-20250613083629-6b8c3b816f00 ## explicit; go 1.23.7 +github.com/docker/model-runner/pkg/diskusage github.com/docker/model-runner/pkg/inference +github.com/docker/model-runner/pkg/inference/backends/llamacpp +github.com/docker/model-runner/pkg/inference/config github.com/docker/model-runner/pkg/inference/models +github.com/docker/model-runner/pkg/inference/scheduling +github.com/docker/model-runner/pkg/internal/archive +github.com/docker/model-runner/pkg/internal/dockerhub +github.com/docker/model-runner/pkg/internal/jsonutil github.com/docker/model-runner/pkg/logging +github.com/docker/model-runner/pkg/metrics +github.com/docker/model-runner/pkg/tailbuffer # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop @@ -174,6 +216,10 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr +# github.com/go-ole/go-ole v1.2.6 +## explicit; go 1.12 +github.com/go-ole/go-ole +github.com/go-ole/go-ole/oleutil # github.com/go-sql-driver/mysql v1.6.0 ## explicit; go 1.10 # github.com/gogo/protobuf v1.3.2 @@ -235,6 +281,33 @@ github.com/henvic/httpretty/internal/header # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap +# github.com/jaypipes/ghw v0.16.0 +## explicit; go 1.21 +github.com/jaypipes/ghw +github.com/jaypipes/ghw/pkg/accelerator +github.com/jaypipes/ghw/pkg/baseboard +github.com/jaypipes/ghw/pkg/bios +github.com/jaypipes/ghw/pkg/block +github.com/jaypipes/ghw/pkg/chassis +github.com/jaypipes/ghw/pkg/context +github.com/jaypipes/ghw/pkg/cpu +github.com/jaypipes/ghw/pkg/gpu +github.com/jaypipes/ghw/pkg/linuxdmi +github.com/jaypipes/ghw/pkg/linuxpath +github.com/jaypipes/ghw/pkg/marshal +github.com/jaypipes/ghw/pkg/memory +github.com/jaypipes/ghw/pkg/net +github.com/jaypipes/ghw/pkg/option +github.com/jaypipes/ghw/pkg/pci +github.com/jaypipes/ghw/pkg/pci/address +github.com/jaypipes/ghw/pkg/product +github.com/jaypipes/ghw/pkg/snapshot +github.com/jaypipes/ghw/pkg/topology +github.com/jaypipes/ghw/pkg/unitutil +github.com/jaypipes/ghw/pkg/util +# github.com/jaypipes/pcidb v1.0.1 +## explicit; go 1.21 +github.com/jaypipes/pcidb # github.com/jinzhu/gorm v1.9.16 ## explicit; go 1.12 # github.com/json-iterator/go v1.1.12 @@ -255,6 +328,9 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth +# github.com/mattn/go-shellwords v1.0.12 +## explicit; go 1.13 +github.com/mattn/go-shellwords # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 @@ -266,6 +342,9 @@ github.com/mitchellh/go-homedir # github.com/moby/docker-image-spec v1.3.1 ## explicit; go 1.18 github.com/moby/docker-image-spec/specs-go/v1 +# github.com/moby/locker v1.0.1 +## explicit; go 1.13 +github.com/moby/locker # github.com/moby/sys/sequential v0.6.0 ## explicit; go 1.17 github.com/moby/sys/sequential @@ -395,6 +474,7 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.20.0 +go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 # go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 ## explicit; go 1.22.0 @@ -483,6 +563,7 @@ golang.org/x/net/trace # golang.org/x/sync v0.12.0 ## explicit; go 1.23.0 golang.org/x/sync/errgroup +golang.org/x/sync/semaphore golang.org/x/sync/singleflight # golang.org/x/sys v0.31.0 ## explicit; go 1.23.0 @@ -655,3 +736,6 @@ gopkg.in/tomb.v1 gopkg.in/yaml.v3 # gotest.tools/v3 v3.5.2 ## explicit; go 1.17 +# howett.net/plist v1.0.0 +## explicit; go 1.12 +howett.net/plist